aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZakk Chen <zakk.chen@sifive.com>2021-04-08 08:28:15 -0700
committerZakk Chen <zakk.chen@sifive.com>2021-04-11 19:19:02 -0700
commita8fc0e445cad91a5c2f207e39ee9220253eb5578 (patch)
treec4ced6dd6a0abdc1d714828a21a640e6a03f975a
parent[RISCV][Clang] Add more RVV load/store intrinsic functions. (diff)
downloadllvm-project-a8fc0e445cad91a5c2f207e39ee9220253eb5578.tar.gz
llvm-project-a8fc0e445cad91a5c2f207e39ee9220253eb5578.tar.bz2
llvm-project-a8fc0e445cad91a5c2f207e39ee9220253eb5578.zip
[RISCV][Clang] Add all RVV Mask intrinsic functions.
1. Redefine vpopc and vfirst IR intrinsic so it could adapt on clang tablegen generator which always appends a type for vl in IntrinsicType of clang codegen. 2. Remove `c` type transformer and add `u` and `l` for unsigned long and long type. Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com> Co-Authored-by: Zakk Chen <zakk.chen@sifive.com> Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D100120
-rw-r--r--clang/include/clang/Basic/riscv_vector.td66
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c190
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c334
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c338
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c197
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c197
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c197
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c196
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c598
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c646
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c92
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c211
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c92
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c211
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c211
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c204
-rw-r--r--clang/utils/TableGen/RISCVVEmitter.cpp25
-rw-r--r--llvm/include/llvm/IR/IntrinsicsRISCV.td8
31 files changed, 5871 insertions, 10 deletions
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index b868fc2d74cf..12440660abba 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -66,7 +66,8 @@
// 0: void type, ignores "t"
// z: size_t, ignores "t"
// t: ptrdiff_t, ignores "t"
-// c: uint8_t, ignores "t"
+// u: unsigned long, ignores "t"
+// l: long, ignores "t"
//
// So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
@@ -241,6 +242,10 @@ multiclass RVVOutOp0Op1BuiltinSet<string intrinsic_name, string type_range,
: RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes,
[-1, 0, 1]>;
+multiclass RVVOutBuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1]>;
+
// IntrinsicTypes is output, op1 [-1, 1]
multiclass RVVOutOp1BuiltinSet<string intrinsic_name, string type_range,
list<list<string>> suffixes_prototypes>
@@ -371,6 +376,27 @@ multiclass RVVFloatingMaskOutVFBuiltinSet
: RVVOp0Op1BuiltinSet<NAME, "fd",
[["vf", "vm", "mve"]]>;
+class RVVMaskBinBuiltin : RVVOutBuiltin<"m", "mmm", "c"> {
+ let Name = NAME # "_mm";
+ let HasMask = false;
+}
+
+class RVVMaskUnaryBuiltin : RVVOutBuiltin<"m", "mm", "c"> {
+ let Name = NAME # "_m";
+}
+
+class RVVMaskNullaryBuiltin : RVVOutBuiltin<"m", "m", "c"> {
+ let Name = NAME # "_m";
+ let HasMask = false;
+ let HasNoMaskedOverloaded = false;
+}
+
+class RVVMaskOp0Builtin<string prototype> : RVVOp0Builtin<"m", prototype, "c"> {
+ let Name = NAME # "_m";
+ let HasMaskedOffOperand = false;
+}
+
+
class RVVFloatingUnaryBuiltin<string builtin_suffix, string ir_suffix,
string prototype>
: RVVOutBuiltin<ir_suffix, prototype, "fd"> {
@@ -1114,3 +1140,41 @@ defm vfredosum : RVVFloatingReductionBuiltin;
// 15.4. Vector Widening Floating-Point Reduction Instructions
defm vfwredsum : RVVFloatingWidenReductionBuiltin;
defm vfwredosum : RVVFloatingWidenReductionBuiltin;
+
+// 16. Vector Mask Instructions
+// 16.1. Vector Mask-Register Logical Instructions
+def vmand : RVVMaskBinBuiltin;
+def vmnand : RVVMaskBinBuiltin;
+def vmandnot : RVVMaskBinBuiltin;
+def vmxor : RVVMaskBinBuiltin;
+def vmor : RVVMaskBinBuiltin;
+def vmnor : RVVMaskBinBuiltin;
+def vmornot : RVVMaskBinBuiltin;
+def vmxnor : RVVMaskBinBuiltin;
+// pseudoinstructions
+def vmclr : RVVMaskNullaryBuiltin;
+def vmset : RVVMaskNullaryBuiltin;
+
+// 16.2. Vector mask population count vpopc
+def vpopc : RVVMaskOp0Builtin<"um">;
+
+// 16.3. vfirst find-first-set mask bit
+def vfirst : RVVMaskOp0Builtin<"lm">;
+
+// 16.4. vmsbf.m set-before-first mask bit
+def vmsbf : RVVMaskUnaryBuiltin;
+
+// 16.5. vmsif.m set-including-first mask bit
+def vmsif : RVVMaskUnaryBuiltin;
+
+// 16.6. vmsof.m set-only-first mask bit
+def vmsof : RVVMaskUnaryBuiltin;
+
+let HasNoMaskedOverloaded = false in {
+ // 16.8. Vector Iota Instruction
+ defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>;
+
+ // 16.9. Vector Element Index Instruction
+ defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"],
+ ["v", "Uv", "Uv"]]>;
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c
new file mode 100644
index 000000000000..4cfa5df5e08f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c
@@ -0,0 +1,190 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b1(vbool1_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b2(vbool2_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b4(vbool4_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b8(vbool8_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b16(vbool16_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b32(vbool32_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b64(vbool64_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
new file mode 100644
index 000000000000..043924d35c1e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
@@ -0,0 +1,334 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
new file mode 100644
index 000000000000..653c8ac4dbe5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
@@ -0,0 +1,338 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vbool2_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vbool1_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vbool2_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c
new file mode 100644
index 000000000000..1d762565fa7b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c
new file mode 100644
index 000000000000..7e0792efcc67
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmnand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c
new file mode 100644
index 000000000000..7eed0c6caed1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c
new file mode 100644
index 000000000000..9883130869a2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
new file mode 100644
index 000000000000..03be5a611345
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
@@ -0,0 +1,197 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
new file mode 100644
index 000000000000..0e6b39d63b1b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
@@ -0,0 +1,197 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
new file mode 100644
index 000000000000..f816cbceea4a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
@@ -0,0 +1,197 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c
new file mode 100644
index 000000000000..ca5799c6f6f7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmxnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c
new file mode 100644
index 000000000000..809b4d80cd07
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmxor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c
new file mode 100644
index 000000000000..52f58beab4c3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c
new file mode 100644
index 000000000000..e1201f2c8a19
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c
@@ -0,0 +1,196 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b1(vbool1_t op1, size_t vl) { return vfirst_m_b1(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b2(vbool2_t op1, size_t vl) { return vfirst_m_b2(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b4(vbool4_t op1, size_t vl) { return vfirst_m_b4(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b8(vbool8_t op1, size_t vl) { return vfirst_m_b8(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b16(vbool16_t op1, size_t vl) {
+ return vfirst_m_b16(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b32(vbool32_t op1, size_t vl) {
+ return vfirst_m_b32(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b64(vbool64_t op1, size_t vl) {
+ return vfirst_m_b64(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
+ return vfirst_m_b1_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
+ return vfirst_m_b2_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
+ return vfirst_m_b4_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
+ return vfirst_m_b8_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
+ return vfirst_m_b16_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
+ return vfirst_m_b32_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
+ return vfirst_m_b64_m(mask, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
new file mode 100644
index 000000000000..314abf18476b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
@@ -0,0 +1,598 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vid_v_u8mf8(size_t vl) { return vid_v_u8mf8(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vid_v_u8mf4(size_t vl) { return vid_v_u8mf4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vid_v_u8mf2(size_t vl) { return vid_v_u8mf2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vid_v_u8m1(size_t vl) { return vid_v_u8m1(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vid_v_u8m2(size_t vl) { return vid_v_u8m2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vid_v_u8m4(size_t vl) { return vid_v_u8m4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vid_v_u8m8(size_t vl) { return vid_v_u8m8(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vid_v_u16mf4(size_t vl) { return vid_v_u16mf4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vid_v_u16mf2(size_t vl) { return vid_v_u16mf2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vid_v_u16m1(size_t vl) { return vid_v_u16m1(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vid_v_u16m2(size_t vl) { return vid_v_u16m2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vid_v_u16m4(size_t vl) { return vid_v_u16m4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vid_v_u16m8(size_t vl) { return vid_v_u16m8(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2(size_t vl) { return vid_v_u32mf2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vid_v_u32m1(size_t vl) { return vid_v_u32m1(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vid_v_u32m2(size_t vl) { return vid_v_u32m2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vid_v_u32m4(size_t vl) { return vid_v_u32m4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vid_v_u32m8(size_t vl) { return vid_v_u32m8(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vid_v_u64m1(size_t vl) { return vid_v_u64m1(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vid_v_u64m2(size_t vl) { return vid_v_u64m2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vid_v_u64m4(size_t vl) { return vid_v_u64m4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vid_v_u64m8(size_t vl) { return vid_v_u64m8(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ size_t vl) {
+ return vid_v_u8mf8_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ size_t vl) {
+ return vid_v_u8mf4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ size_t vl) {
+ return vid_v_u8mf2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
+ return vid_v_u8m1_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
+ return vid_v_u8m2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
+ return vid_v_u8m4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
+ return vid_v_u8m8_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ size_t vl) {
+ return vid_v_u16mf4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ size_t vl) {
+ return vid_v_u16mf2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ size_t vl) {
+ return vid_v_u16m1_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ size_t vl) {
+ return vid_v_u16m2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ size_t vl) {
+ return vid_v_u16m4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ size_t vl) {
+ return vid_v_u16m8_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ size_t vl) {
+ return vid_v_u32mf2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ size_t vl) {
+ return vid_v_u32m1_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ size_t vl) {
+ return vid_v_u32m2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ size_t vl) {
+ return vid_v_u32m4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ size_t vl) {
+ return vid_v_u32m8_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ size_t vl) {
+ return vid_v_u64m1_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ size_t vl) {
+ return vid_v_u64m2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ size_t vl) {
+ return vid_v_u64m4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ size_t vl) {
+ return vid_v_u64m8_m(mask, maskedoff, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
new file mode 100644
index 000000000000..dd7c7cd17b50
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
@@ -0,0 +1,646 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) {
+ return viota_m_u8mf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) {
+ return viota_m_u8mf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) {
+ return viota_m_u8mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) {
+ return viota_m_u8m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) {
+ return viota_m_u8m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) {
+ return viota_m_u8m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) {
+ return viota_m_u8m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) {
+ return viota_m_u16mf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) {
+ return viota_m_u16mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) {
+ return viota_m_u16m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) {
+ return viota_m_u16m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) {
+ return viota_m_u16m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) {
+ return viota_m_u16m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) {
+ return viota_m_u32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) {
+ return viota_m_u32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) {
+ return viota_m_u32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) {
+ return viota_m_u32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) {
+ return viota_m_u32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) {
+ return viota_m_u64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) {
+ return viota_m_u64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) {
+ return viota_m_u64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) {
+ return viota_m_u64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota_m_u8mf8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota_m_u8mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota_m_u8mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota_m_u8m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota_m_u8m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vbool2_t op1, size_t vl) {
+ return viota_m_u8m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vbool1_t op1, size_t vl) {
+ return viota_m_u8m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota_m_u16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota_m_u16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota_m_u16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota_m_u16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota_m_u16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vbool2_t op1, size_t vl) {
+ return viota_m_u16m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota_m_u32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota_m_u32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota_m_u32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota_m_u32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota_m_u32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota_m_u64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota_m_u64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota_m_u64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota_m_u64m8_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c
new file mode 100644
index 000000000000..f89808de7ff8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmand_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmand_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmand_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmand_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmand_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmand_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmand_mm_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmandnot_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmandnot_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmandnot_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmandnot_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmandnot_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmandnot_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmandnot_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c
new file mode 100644
index 000000000000..70424925ba02
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c
@@ -0,0 +1,92 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmclr_m_b1(size_t vl) { return vmclr_m_b1(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmclr_m_b2(size_t vl) { return vmclr_m_b2(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmclr_m_b4(size_t vl) { return vmclr_m_b4(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmclr_m_b8(size_t vl) { return vmclr_m_b8(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmclr_m_b16(size_t vl) { return vmclr_m_b16(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmclr_m_b32(size_t vl) { return vmclr_m_b32(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmclr_m_b64(size_t vl) { return vmclr_m_b64(vl); }
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c
new file mode 100644
index 000000000000..c7fd8f589f92
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmnand_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmnand_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmnand_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmnand_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmnand_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmnand_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmnand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmnand_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c
new file mode 100644
index 000000000000..2641e55e04c6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmnor_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmnor_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmnor_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmnor_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmnor_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmnor_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmnor_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c
new file mode 100644
index 000000000000..c6c6e7a5ba93
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmor_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmor_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmor_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmor_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmor_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmor_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmor_mm_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmornot_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmornot_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmornot_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmornot_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmornot_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmornot_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmornot_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
new file mode 100644
index 000000000000..df0afc96309f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
@@ -0,0 +1,211 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) {
+ return vmsbf_m_b1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) {
+ return vmsbf_m_b2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) {
+ return vmsbf_m_b4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) {
+ return vmsbf_m_b8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) {
+ return vmsbf_m_b16(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) {
+ return vmsbf_m_b32(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) {
+ return vmsbf_m_b64(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsbf_m_b1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsbf_m_b2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsbf_m_b4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsbf_m_b8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsbf_m_b16_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsbf_m_b32_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsbf_m_b64_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c
new file mode 100644
index 000000000000..bb61dfe7c8fa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c
@@ -0,0 +1,92 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmset_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmset_m_b1(size_t vl) { return vmset_m_b1(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmset_m_b2(size_t vl) { return vmset_m_b2(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmset_m_b4(size_t vl) { return vmset_m_b4(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmset_m_b8(size_t vl) { return vmset_m_b8(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmset_m_b16(size_t vl) { return vmset_m_b16(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmset_m_b32(size_t vl) { return vmset_m_b32(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmset_m_b64(size_t vl) { return vmset_m_b64(vl); }
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
new file mode 100644
index 000000000000..f3ad1de79f2c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
@@ -0,0 +1,211 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) {
+ return vmsif_m_b1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) {
+ return vmsif_m_b2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) {
+ return vmsif_m_b4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) {
+ return vmsif_m_b8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) {
+ return vmsif_m_b16(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) {
+ return vmsif_m_b32(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) {
+ return vmsif_m_b64(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsif_m_b1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsif_m_b2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsif_m_b4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsif_m_b8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsif_m_b16_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsif_m_b32_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsif_m_b64_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
new file mode 100644
index 000000000000..ccc66d48bdf5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
@@ -0,0 +1,211 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) {
+ return vmsof_m_b1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) {
+ return vmsof_m_b2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) {
+ return vmsof_m_b4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) {
+ return vmsof_m_b8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) {
+ return vmsof_m_b16(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) {
+ return vmsof_m_b32(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) {
+ return vmsof_m_b64(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsof_m_b1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsof_m_b2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsof_m_b4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsof_m_b8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsof_m_b16_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsof_m_b32_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsof_m_b64_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c
new file mode 100644
index 000000000000..5f06b751718b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmxnor_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmxnor_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmxnor_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmxnor_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmxnor_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmxnor_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmxnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmxnor_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c
new file mode 100644
index 000000000000..9c843877a83f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmxor_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmxor_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmxor_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmxor_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmxor_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmxor_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmxor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmxor_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c
new file mode 100644
index 000000000000..a94aa34d1f9f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) {
+ return vpopc_m_b1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) {
+ return vpopc_m_b2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) {
+ return vpopc_m_b4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) {
+ return vpopc_m_b8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) {
+ return vpopc_m_b16(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) {
+ return vpopc_m_b32(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) {
+ return vpopc_m_b64(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
+ return vpopc_m_b1_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
+ return vpopc_m_b2_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
+ return vpopc_m_b4_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
+ return vpopc_m_b8_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
+ return vpopc_m_b16_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
+ return vpopc_m_b32_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
+ return vpopc_m_b64_m(mask, op1, vl);
+}
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index a724ec4cb9db..ed1e0c2b8f59 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -50,6 +50,8 @@ class RVVType {
Void,
Size_t,
Ptrdiff_t,
+ UnsignedLong,
+ SignedLong,
Boolean,
SignedInteger,
UnsignedInteger,
@@ -375,6 +377,12 @@ void RVVType::initBuiltinStr() {
case ScalarTypeKind::Ptrdiff_t:
BuiltinStr = "Y";
return;
+ case ScalarTypeKind::UnsignedLong:
+ BuiltinStr = "ULi";
+ return;
+ case ScalarTypeKind::SignedLong:
+ BuiltinStr = "Li";
+ return;
case ScalarTypeKind::Boolean:
assert(ElementBitwidth == 1);
BuiltinStr += "b";
@@ -481,6 +489,12 @@ void RVVType::initTypeStr() {
case ScalarTypeKind::Ptrdiff_t:
Str = "ptrdiff_t";
return;
+ case ScalarTypeKind::UnsignedLong:
+ Str = "unsigned long";
+ return;
+ case ScalarTypeKind::SignedLong:
+ Str = "long";
+ return;
case ScalarTypeKind::Boolean:
if (isScalar())
Str += "bool";
@@ -610,10 +624,11 @@ void RVVType::applyModifier(StringRef Transformer) {
case 't':
ScalarType = ScalarTypeKind::Ptrdiff_t;
break;
- case 'c': // uint8_t
- ScalarType = ScalarTypeKind::UnsignedInteger;
- ElementBitwidth = 8;
- Scale = 0;
+ case 'u':
+ ScalarType = ScalarTypeKind::UnsignedLong;
+ break;
+ case 'l':
+ ScalarType = ScalarTypeKind::SignedLong;
break;
default:
PrintFatalError("Illegal primitive type transformers!");
@@ -1013,7 +1028,7 @@ void RVVEmitter::createCodeGen(raw_ostream &OS) {
void RVVEmitter::parsePrototypes(StringRef Prototypes,
std::function<void(StringRef)> Handler) {
- const StringRef Primaries("evwqom0ztc");
+ const StringRef Primaries("evwqom0ztul");
while (!Prototypes.empty()) {
size_t Idx = 0;
// Skip over complex prototype because it could contain primitive type
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index fa005c98f11d..7d342becea25 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -548,15 +548,15 @@ let TargetPrefix = "riscv" in {
// Output: (scalar type)
// Input: (vector_in, vl)
class RISCVMaskUnarySOutNoMask
- : Intrinsic<[llvm_anyint_ty],
- [llvm_anyvector_ty, LLVMMatchType<0>],
+ : Intrinsic<[LLVMMatchType<1>],
+ [llvm_anyvector_ty, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;
// For unary operations with scalar type output with mask
// Output: (scalar type)
// Input: (vector_in, mask, vl)
class RISCVMaskUnarySOutMask
- : Intrinsic<[llvm_anyint_ty],
- [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>],
+ : Intrinsic<[LLVMMatchType<1>],
+ [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;
// For destination vector type is NOT the same as source vector.
// Input: (vector_in, vl)