aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll')
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll1278
1 files changed, 1119 insertions, 159 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
index 5b4e790ffb7a..3a0d1302f9d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
@@ -1,6 +1,822 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei64.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> *%0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
<vscale x 1 x i32>*,
<vscale x 1 x i16>,
@@ -241,6 +1057,198 @@ entry:
ret <vscale x 16 x i32> %a
}
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei16.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
<vscale x 1 x i32>*,
<vscale x 1 x i8>,
@@ -481,242 +1489,194 @@ entry:
ret <vscale x 16 x i32> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
i32);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i32:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
- <vscale x 1 x i32> *%0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
i32 %3)
- ret <vscale x 1 x i32> %a
+ ret <vscale x 1 x i64> %a
}
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
+declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32:
+define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v9, (a0), v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> *%0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
+ %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> *%0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9
-; CHECK-NEXT: vmv1r.v v8, v9
-; CHECK-NEXT: jalr zero, 0(ra)
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
- <vscale x 2 x i32> *%0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v9
-; CHECK-NEXT: jalr zero, 0(ra)
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> *%0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i32> %a
+ ret <vscale x 1 x i64> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
i32);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i32:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
- <vscale x 4 x i32> *%0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
i32 %3)
- ret <vscale x 4 x i32> %a
+ ret <vscale x 2 x i64> %a
}
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
+declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
i32);
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32:
+define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v10, (a0), v8, v10, v0.t
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v10, (a0), v8, v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> *%0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
+ %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> *%0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
i32 %4)
- ret <vscale x 4 x i32> %a
+ ret <vscale x 2 x i64> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
i32);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i32:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
- <vscale x 8 x i32> *%0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
i32 %3)
- ret <vscale x 8 x i32> %a
+ ret <vscale x 4 x i64> %a
}
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
+declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
i32);
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32:
+define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v12, (a0), v8, v12, v0.t
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v12, (a0), v8, v12, v0.t
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> *%0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
+ %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> *%0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
i32 %4)
- ret <vscale x 8 x i32> %a
+ ret <vscale x 4 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
i32 %3)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
+declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
i32);
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32:
+define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
-; CHECK-NEXT: vamomaxuei32.v v16, (a0), v8, v16, v0.t
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vamomaxuei8.v v16, (a0), v8, v16, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> *%0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
+ %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> *%0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
i32 %4)
- ret <vscale x 16 x i32> %a
+ ret <vscale x 8 x i64> %a
}