diff options
Diffstat (limited to 'llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll')
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll | 492 |
1 files changed, 492 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll index 4ab2aa7c1681..d582ba8ada92 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll @@ -496,6 +496,186 @@ entry: ret <vscale x 16 x i16> %a } +declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + <vscale x 1 x i64>, + <vscale x 1 x i32>, + i32); + +define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vnclipu.wv v25, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32( + <vscale x 1 x i64> %0, + <vscale x 1 x i32> %1, + i32 %2) + + ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32( + <vscale x 1 x i32>, + <vscale x 1 x i64>, + <vscale x 1 x i32>, + <vscale x 1 x i1>, + i32); + +define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32( + <vscale x 1 x i32> %0, + <vscale x 1 x i64> %1, + <vscale x 1 x i32> %2, + <vscale x 1 x i1> %3, + i32 %4) + + ret <vscale x 1 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + <vscale x 2 x i64>, + <vscale x 2 x i32>, + i32); + +define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vnclipu.wv v25, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( + <vscale x 2 x i64> %0, + <vscale x 2 x i32> %1, + i32 %2) + + ret <vscale x 2 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32( + <vscale x 2 x i32>, + <vscale x 2 x i64>, + <vscale x 2 x i32>, + <vscale x 2 x i1>, + i32); + +define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32( + <vscale x 2 x i32> %0, + <vscale x 2 x i64> %1, + <vscale x 2 x i32> %2, + <vscale x 2 x i1> %3, + i32 %4) + + ret <vscale x 2 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + <vscale x 4 x i64>, + <vscale x 4 x i32>, + i32); + +define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vnclipu.wv v26, v8, v12 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( + <vscale x 4 x i64> %0, + <vscale x 4 x i32> %1, + i32 %2) + + ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32( + <vscale x 4 x i32>, + <vscale x 4 x i64>, + <vscale x 4 x i32>, + <vscale x 4 x i1>, + i32); + +define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32( + <vscale x 4 x i32> %0, + <vscale x 4 x i64> %1, + <vscale x 4 x i32> %2, + <vscale x 4 x i1> %3, + i32 %4) + + ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + <vscale x 8 x i64>, + <vscale x 8 x i32>, + i32); + +define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vnclipu.wv v28, v8, v16 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( + <vscale x 8 x i64> %0, + <vscale x 8 x i32> %1, + i32 %2) + + ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32( + <vscale x 8 x i32>, + <vscale x 8 x i64>, + <vscale x 8 x i32>, + <vscale x 8 x i1>, + i32); + +define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32( + <vscale x 8 x i32> %0, + <vscale x 8 x i64> %1, + <vscale x 8 x i32> %2, + <vscale x 8 x i1> %3, + i32 %4) + + ret <vscale x 8 x i32> %a +} + declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16( <vscale x 1 x i16>, i32, @@ -991,6 +1171,186 @@ entry: ret <vscale x 16 x i16> %a } +declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + <vscale x 1 x i64>, + i32, + i32); + +define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vnclipu.wx v25, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + <vscale x 1 x i64> %0, + i32 %1, + i32 %2) + + ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64( + <vscale x 1 x i32>, + <vscale x 1 x i64>, + i32, + <vscale x 1 x i1>, + i32); + +define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64( + <vscale x 1 x i32> %0, + <vscale x 1 x i64> %1, + i32 %2, + <vscale x 1 x i1> %3, + i32 %4) + + ret <vscale x 1 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + <vscale x 2 x i64>, + i32, + i32); + +define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vnclipu.wx v25, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + <vscale x 2 x i64> %0, + i32 %1, + i32 %2) + + ret <vscale x 2 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64( + <vscale x 2 x i32>, + <vscale x 2 x i64>, + i32, + <vscale x 2 x i1>, + i32); + +define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64( + <vscale x 2 x i32> %0, + <vscale x 2 x i64> %1, + i32 %2, + <vscale x 2 x i1> %3, + i32 %4) + + ret <vscale x 2 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + <vscale x 4 x i64>, + i32, + i32); + +define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vnclipu.wx v26, v8, a0 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + <vscale x 4 x i64> %0, + i32 %1, + i32 %2) + + ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64( + <vscale x 4 x i32>, + <vscale x 4 x i64>, + i32, + <vscale x 4 x i1>, + i32); + +define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64( + <vscale x 4 x i32> %0, + <vscale x 4 x i64> %1, + i32 %2, + <vscale x 4 x i1> %3, + i32 %4) + + ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + <vscale x 8 x i64>, + i32, + i32); + +define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vnclipu.wx v28, v8, a0 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + <vscale x 8 x i64> %0, + i32 %1, + i32 %2) + + ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64( + <vscale x 8 x i32>, + <vscale x 8 x i64>, + i32, + <vscale x 8 x i1>, + i32); + +define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64( + <vscale x 8 x i32> %0, + <vscale x 8 x i64> %1, + i32 %2, + <vscale x 8 x i1> %3, + i32 %4) + + ret <vscale x 8 x i32> %a +} + define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry @@ -1353,3 +1713,135 @@ entry: ret <vscale x 16 x i16> %a } + +define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vnclipu.wi v25, v8, 9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64( + <vscale x 1 x i64> %0, + i32 9, + i32 %1) + + ret <vscale x 1 x i32> %a +} + +define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64( + <vscale x 1 x i32> %0, + <vscale x 1 x i64> %1, + i32 9, + <vscale x 1 x i1> %2, + i32 %3) + + ret <vscale x 1 x i32> %a +} + +define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vnclipu.wi v25, v8, 9 +; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64( + <vscale x 2 x i64> %0, + i32 9, + i32 %1) + + ret <vscale x 2 x i32> %a +} + +define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64( + <vscale x 2 x i32> %0, + <vscale x 2 x i64> %1, + i32 9, + <vscale x 2 x i1> %2, + i32 %3) + + ret <vscale x 2 x i32> %a +} + +define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vnclipu.wi v26, v8, 9 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64( + <vscale x 4 x i64> %0, + i32 9, + i32 %1) + + ret <vscale x 4 x i32> %a +} + +define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64( + <vscale x 4 x i32> %0, + <vscale x 4 x i64> %1, + i32 9, + <vscale x 4 x i1> %2, + i32 %3) + + ret <vscale x 4 x i32> %a +} + +define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vnclipu.wi v28, v8, 9 +; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64( + <vscale x 8 x i64> %0, + i32 9, + i32 %1) + + ret <vscale x 8 x i32> %a +} + +define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64( + <vscale x 8 x i32> %0, + <vscale x 8 x i64> %1, + i32 9, + <vscale x 8 x i1> %2, + i32 %3) + + ret <vscale x 8 x i32> %a +} |