diff options
Diffstat (limited to 'clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c')
-rw-r--r-- | clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c | 2599 |
1 files changed, 2599 insertions, 0 deletions
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c new file mode 100644 index 000000000000..1959f490357a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c @@ -0,0 +1,2599 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include <riscv_vector.h> + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf8_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmsle_vv_i8mf8_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf8_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8mf8_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf4_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmsle_vv_i8mf4_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf4_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8mf4_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmsle_vv_i8mf2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8mf2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8m1_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmsle_vv_i8m1_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8m1_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m1_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8m2_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmsle_vv_i8m2_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8m2_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m2_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8m4_b2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmsle_vv_i8m4_b2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8m4_b2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m4_b2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8m8_b1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmsle_vv_i8m8_b1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8m8_b1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m8_b1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf4_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vmsle_vv_i16mf4_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf4_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16mf4_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vmsle_vv_i16mf2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16mf2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16m1_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmsle_vv_i16m1_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16m1_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m1_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16m2_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmsle_vv_i16m2_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16m2_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m2_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16m4_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmsle_vv_i16m4_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16m4_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m4_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16m8_b2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmsle_vv_i16m8_b2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16m8_b2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m8_b2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vmsle_vv_i32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmsle_vv_i32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmsle_vv_i32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmsle_vv_i32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmsle_vv_i32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmsle_vv_i64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmsle_vv_i64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmsle_vv_i64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmsle_vv_i64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf8_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vmsleu_vv_u8mf8_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf8_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8mf8_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf4_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vmsleu_vv_u8mf4_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf4_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8mf4_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vmsleu_vv_u8mf2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8mf2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m1_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsleu_vv_u8m1_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m1_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m1_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m2_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsleu_vv_u8m2_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m2_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m2_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m4_b2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsleu_vv_u8m4_b2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m4_b2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m4_b2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m8_b1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsleu_vv_u8m8_b1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m8_b1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m8_b1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf4_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vmsleu_vv_u16mf4_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf4_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16mf4_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vmsleu_vv_u16mf2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16mf2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m1_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vmsleu_vv_u16m1_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m1_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m1_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m2_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmsleu_vv_u16m2_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m2_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m2_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m4_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmsleu_vv_u16m4_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m4_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m4_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m8_b2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmsleu_vv_u16m8_b2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m8_b2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m8_b2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vmsleu_vv_u32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vmsleu_vv_u32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vmsleu_vv_u32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmsleu_vv_u32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmsleu_vv_u32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, + size_t vl) { + return vmsleu_vv_u64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, + size_t vl) { + return vmsleu_vv_u64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, + size_t vl) { + return vmsleu_vv_u64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmsleu_vv_u64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf8_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmsle_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf8_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf4_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmsle_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf4_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmsle_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8m1_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmsle_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8m1_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8m2_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmsle_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8m2_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8m4_b2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmsle_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8m4_b2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i8m8_b1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmsle_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i8m8_b1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vmsle_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf4_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vmsle_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf4_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vmsle_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16m1_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmsle_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16m1_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16m2_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmsle_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16m2_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16m4_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmsle_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16m4_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i16m8_b2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmsle_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i16m8_b2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vmsle_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vmsle_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmsle_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmsle_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmsle_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmsle_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vmsle_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmsle_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmsle_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmsle_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vv_i64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmsle_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsle_vx_i64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vmsle_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf8_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vmsleu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf8_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf4_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vmsleu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf4_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vmsleu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m1_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmsleu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m1_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m2_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmsleu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m2_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m4_b2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmsleu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m4_b2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m8_b1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, + vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmsleu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m8_b1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]] +// +vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, + vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmsleu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf4_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vmsleu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf4_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vmsleu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vmsleu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vuint16mf2_t op1, uint16_t op2, + size_t vl) { + return vmsleu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m1_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vmsleu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m1_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m2_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vmsleu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m2_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m4_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vmsleu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m4_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m8_b2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, + vuint16m8_t op1, vuint16m8_t op2, + size_t vl) { + return vmsleu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m8_b2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]] +// +vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, + vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmsleu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vmsleu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vuint32mf2_t op1, uint32_t op2, + size_t vl) { + return vmsleu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vmsleu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vmsleu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vmsleu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vuint32m8_t op1, vuint32m8_t op2, + size_t vl) { + return vmsleu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]] +// +vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmsleu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vuint64m1_t op1, vuint64m1_t op2, + size_t vl) { + return vmsleu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]] +// +vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vuint64m2_t op1, vuint64m2_t op2, + size_t vl) { + return vmsleu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]] +// +vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vuint64m4_t op1, vuint64m4_t op2, + size_t vl) { + return vmsleu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]] +// +vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vuint64m8_t op1, vuint64m8_t op2, + size_t vl) { + return vmsleu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]] +// +vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmsleu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); +} |