aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll')
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll423
1 files changed, 423 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
index 073998351063..c3d4943c3afb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
@@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vmulhsu.vv v8, v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vmulhsu.vv v8, v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vmulhsu.vv v8, v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vmulhsu.vv v8, v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
+; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
+
+declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v25, v25, a1
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vsll.vx v26, v26, a1
+; CHECK-NEXT: vsrl.vx v26, v26, a1
+; CHECK-NEXT: vor.vv v25, v26, v25
+; CHECK-NEXT: vmulhsu.vv v8, v8, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v25, v25, a1
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vsll.vx v26, v26, a1
+; CHECK-NEXT: vsrl.vx v26, v26, a1
+; CHECK-NEXT: vor.vv v25, v26, v25
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT: vmulhsu.vv v8, v9, v25, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v26, v26, a1
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vsll.vx v28, v28, a1
+; CHECK-NEXT: vsrl.vx v28, v28, a1
+; CHECK-NEXT: vor.vv v26, v28, v26
+; CHECK-NEXT: vmulhsu.vv v8, v8, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v26, v26, a1
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vsll.vx v28, v28, a1
+; CHECK-NEXT: vsrl.vx v28, v28, a1
+; CHECK-NEXT: vor.vv v26, v28, v26
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT: vmulhsu.vv v8, v10, v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v28, v28, a1
+; CHECK-NEXT: vmv.v.x v12, a0
+; CHECK-NEXT: vsll.vx v12, v12, a1
+; CHECK-NEXT: vsrl.vx v12, v12, a1
+; CHECK-NEXT: vor.vv v28, v12, v28
+; CHECK-NEXT: vmulhsu.vv v8, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v28, v28, a1
+; CHECK-NEXT: vmv.v.x v16, a0
+; CHECK-NEXT: vsll.vx v16, v16, a1
+; CHECK-NEXT: vsrl.vx v16, v16, a1
+; CHECK-NEXT: vor.vv v28, v16, v28
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT: vmulhsu.vv v8, v12, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ i64,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v16, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v16, v16, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v16, v24, v16
+; CHECK-NEXT: vmulhsu.vv v8, v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrrs a3, vlenb, zero
+; CHECK-NEXT: sub sp, sp, a3
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v24, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v0, v24, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v24, v24, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
+; CHECK-NEXT: csrrs a0, vlenb, zero
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}