diff options
Diffstat (limited to 'llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll')
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll | 338 |
1 files changed, 337 insertions, 1 deletions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll index 6cd5f56d0401..d82124ecf86e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll @@ -1,7 +1,343 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \ -; RUN: -mattr=+f -verify-machineinstrs \ +; RUN: -mattr=+d -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vse.nxv1i64( + <vscale x 1 x i64>, + <vscale x 1 x i64>*, + i32); + +define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.nxv1i64( + <vscale x 1 x i64> %0, + <vscale x 1 x i64>* %1, + i32 %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1i64( + <vscale x 1 x i64>, + <vscale x 1 x i64>*, + <vscale x 1 x i1>, + i32); + +define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.mask.nxv1i64( + <vscale x 1 x i64> %0, + <vscale x 1 x i64>* %1, + <vscale x 1 x i1> %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv2i64( + <vscale x 2 x i64>, + <vscale x 2 x i64>*, + i32); + +define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.nxv2i64( + <vscale x 2 x i64> %0, + <vscale x 2 x i64>* %1, + i32 %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv2i64( + <vscale x 2 x i64>, + <vscale x 2 x i64>*, + <vscale x 2 x i1>, + i32); + +define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.mask.nxv2i64( + <vscale x 2 x i64> %0, + <vscale x 2 x i64>* %1, + <vscale x 2 x i1> %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv4i64( + <vscale x 4 x i64>, + <vscale x 4 x i64>*, + i32); + +define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.nxv4i64( + <vscale x 4 x i64> %0, + <vscale x 4 x i64>* %1, + i32 %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv4i64( + <vscale x 4 x i64>, + <vscale x 4 x i64>*, + <vscale x 4 x i1>, + i32); + +define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.mask.nxv4i64( + <vscale x 4 x i64> %0, + <vscale x 4 x i64>* %1, + <vscale x 4 x i1> %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv8i64( + <vscale x 8 x i64>, + <vscale x 8 x i64>*, + i32); + +define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.nxv8i64( + <vscale x 8 x i64> %0, + <vscale x 8 x i64>* %1, + i32 %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv8i64( + <vscale x 8 x i64>, + <vscale x 8 x i64>*, + <vscale x 8 x i1>, + i32); + +define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.mask.nxv8i64( + <vscale x 8 x i64> %0, + <vscale x 8 x i64>* %1, + <vscale x 8 x i1> %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv1f64( + <vscale x 1 x double>, + <vscale x 1 x double>*, + i32); + +define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.nxv1f64( + <vscale x 1 x double> %0, + <vscale x 1 x double>* %1, + i32 %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1f64( + <vscale x 1 x double>, + <vscale x 1 x double>*, + <vscale x 1 x i1>, + i32); + +define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.mask.nxv1f64( + <vscale x 1 x double> %0, + <vscale x 1 x double>* %1, + <vscale x 1 x i1> %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv2f64( + <vscale x 2 x double>, + <vscale x 2 x double>*, + i32); + +define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.nxv2f64( + <vscale x 2 x double> %0, + <vscale x 2 x double>* %1, + i32 %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv2f64( + <vscale x 2 x double>, + <vscale x 2 x double>*, + <vscale x 2 x i1>, + i32); + +define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.mask.nxv2f64( + <vscale x 2 x double> %0, + <vscale x 2 x double>* %1, + <vscale x 2 x i1> %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv4f64( + <vscale x 4 x double>, + <vscale x 4 x double>*, + i32); + +define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.nxv4f64( + <vscale x 4 x double> %0, + <vscale x 4 x double>* %1, + i32 %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv4f64( + <vscale x 4 x double>, + <vscale x 4 x double>*, + <vscale x 4 x i1>, + i32); + +define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.mask.nxv4f64( + <vscale x 4 x double> %0, + <vscale x 4 x double>* %1, + <vscale x 4 x i1> %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv8f64( + <vscale x 8 x double>, + <vscale x 8 x double>*, + i32); + +define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.nxv8f64( + <vscale x 8 x double> %0, + <vscale x 8 x double>* %1, + i32 %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv8f64( + <vscale x 8 x double>, + <vscale x 8 x double>*, + <vscale x 8 x i1>, + i32); + +define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + call void @llvm.riscv.vse.mask.nxv8f64( + <vscale x 8 x double> %0, + <vscale x 8 x double>* %1, + <vscale x 8 x i1> %2, + i32 %3) + + ret void +} + declare void @llvm.riscv.vse.nxv1i32( <vscale x 1 x i32>, <vscale x 1 x i32>*, |