[Clang][RISCV] Introduce OFP8(E4M3, E5M2) RISC-V vector types (#191349)

Currently there's no OFP8 scalar type supported in both clang and llvm
type system, the vector OFP8 RVV types are lowered to i8 llvm types for
now.
The reason to support only clang type is because of intrinsics
definition capability. If we make the clang type also using uint8 vector
types, it's not able to distinguish between E4M3 type and E5M2 type so
that we have to append additional type suffix to it.
intrinsic spec update pr:
https://github.com/riscv-non-isa/riscv-rvv-intrinsic-doc/pull/432
vreinterpret intrinsic PR:
https://github.com/llvm/llvm-project/pull/191626

DONT MERGE: We have to get the intrinsic spec merged first to be able to
make zvfofp8min change
This commit is contained in:
Brandon Wu
2026-04-25 22:35:26 +09:00
committed by GitHub
parent 44633491b7
commit 228fabd5be
22 changed files with 1993 additions and 1770 deletions

View File

@@ -1967,7 +1967,7 @@ protected:
unsigned : NumTypeBits;
/// The kind (BuiltinType::Kind) of builtin type this is.
static constexpr unsigned NumOfBuiltinTypeBits = 9;
static constexpr unsigned NumOfBuiltinTypeBits = 10;
unsigned Kind : NumOfBuiltinTypeBits;
};

View File

@@ -73,6 +73,11 @@
RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, false, false, true)
#endif
#ifndef RVV_VECTOR_TYPE_OFP8
#define RVV_VECTOR_TYPE_OFP8(Name, Id, SingletonId, NumEls, E5m2) \
RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, 8, 1, false)
#endif
//===- Vector types -------------------------------------------------------===//
RVV_VECTOR_TYPE_INT("__rvv_int8mf8_t", RvvInt8mf8, RvvInt8mf8Ty, 1, 8, 1, true)
@@ -127,6 +132,36 @@ RVV_VECTOR_TYPE_INT("__rvv_uint64m2_t",RvvUint64m2,RvvUint64m2Ty,2, 64, 1, fals
RVV_VECTOR_TYPE_INT("__rvv_uint64m4_t",RvvUint64m4,RvvUint64m4Ty,4, 64, 1, false)
RVV_VECTOR_TYPE_INT("__rvv_uint64m8_t",RvvUint64m8,RvvUint64m8Ty,8, 64, 1, false)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e4m3mf8_t", RvvFloat8E4M3mf8,
RvvFloat8E4M3mf8Ty, 1, false)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e4m3mf4_t", RvvFloat8E4M3mf4,
RvvFloat8E4M3mf4Ty, 2, false)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e4m3mf2_t", RvvFloat8E4M3mf2,
RvvFloat8E4M3mf2Ty, 4, false)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e4m3m1_t", RvvFloat8E4M3m1,
RvvFloat8E4M3m1Ty, 8, false)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e4m3m2_t", RvvFloat8E4M3m2,
RvvFloat8E4M3m2Ty, 16, false)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e4m3m4_t", RvvFloat8E4M3m4,
RvvFloat8E4M3m4Ty, 32, false)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e4m3m8_t", RvvFloat8E4M3m8,
RvvFloat8E4M3m8Ty, 64, false)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e5m2mf8_t", RvvFloat8E5M2mf8,
RvvFloat8E5M2mf8Ty, 1, true)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e5m2mf4_t", RvvFloat8E5M2mf4,
RvvFloat8E5M2mf4Ty, 2, true)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e5m2mf2_t", RvvFloat8E5M2mf2,
RvvFloat8E5M2mf2Ty, 4, true)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e5m2m1_t", RvvFloat8E5M2m1,
RvvFloat8E5M2m1Ty, 8, true)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e5m2m2_t", RvvFloat8E5M2m2,
RvvFloat8E5M2m2Ty, 16, true)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e5m2m4_t", RvvFloat8E5M2m4,
RvvFloat8E5M2m4Ty, 32, true)
RVV_VECTOR_TYPE_OFP8("__rvv_float8e5m2m8_t", RvvFloat8E5M2m8,
RvvFloat8E5M2m8Ty, 64, true)
RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4_t",RvvFloat16mf4,RvvFloat16mf4Ty,1, 16, 1)
RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2_t",RvvFloat16mf2,RvvFloat16mf2Ty,2, 16, 1)
RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1_t", RvvFloat16m1, RvvFloat16m1Ty, 4, 16, 1)
@@ -508,6 +543,7 @@ RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m2x4_t", RvvBFloat16m2x4, RvvBFloat16m2x4T
RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m4x2_t", RvvBFloat16m4x2, RvvBFloat16m4x2Ty,
16, 16, 2)
#undef RVV_VECTOR_TYPE_OFP8
#undef RVV_VECTOR_TYPE_BFLOAT
#undef RVV_VECTOR_TYPE_FLOAT
#undef RVV_VECTOR_TYPE_INT

View File

@@ -1360,9 +1360,9 @@ def vfwcvtbf16_f_f_v : RVVConvBuiltin<"Fw", "Fwv", "y", "vfwcvtbf16_f">;
let Log2LMUL = [-3, -2, -1, 0, 1, 2],
RequiredFeatures = ["zvfofp8min"],
UnMaskedPolicyScheme = HasPassthruOperand in {
let OverloadedName = "vfwcvt_f_f8e4m3_bf16" in
let OverloadedName = "vfwcvt_f_bf16" in
defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "a", [["vw", "wv"]]>;
let OverloadedName = "vfwcvt_f_f8e5m2_bf16",
let OverloadedName = "vfwcvt_f_bf16",
IRName = "vfwcvt_f_f_v_alt",
MaskedIRName = "vfwcvt_f_f_v_alt_mask",
AltFmt = 1 in
@@ -1451,16 +1451,16 @@ let ManualCodegen = [{
// Zvfofp8min
let RequiredFeatures = ["zvfofp8min"] in {
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
let OverloadedName = "vfncvt_f_bf16_f8e4m3" in
let OverloadedName = "vfncvt_f_f8e4m3" in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "a", [["wv", "vwu"]]>;
let OverloadedName = "vfncvt_f_bf16_f8e5m2",
let OverloadedName = "vfncvt_f_f8e5m2",
IRName = "vfncvt_f_f_w_alt",
MaskedIRName = "vfncvt_f_f_w_alt_mask",
AltFmt = 1 in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "b", [["wv", "vwu"]]>;
let OverloadedName = "vfncvt_sat_f_bf16_f8e4m3" in
let OverloadedName = "vfncvt_sat_f_f8e4m3" in
defm : RVVConvBuiltinSet<"vfncvt_sat_f_f_w", "a", [["wv", "vwu"]]>;
let OverloadedName = "vfncvt_sat_f_bf16_f8e5m2",
let OverloadedName = "vfncvt_sat_f_f8e5m2",
IRName = "vfncvt_sat_f_f_w_alt",
MaskedIRName = "vfncvt_sat_f_f_w_alt_mask",
AltFmt = 1 in
@@ -1559,16 +1559,16 @@ let ManualCodegen = [{
// Zvfofp8min
let RequiredFeatures = ["zvfofp8min"] in {
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
let OverloadedName = "vfncvt_f_bf16_f8e4m3" in
let OverloadedName = "vfncvt_f_f8e4m3" in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "a", [["wv", "vw"]]>;
let OverloadedName = "vfncvt_f_bf16_f8e5m2",
let OverloadedName = "vfncvt_f_f8e5m2",
IRName = "vfncvt_f_f_w_alt",
MaskedIRName = "vfncvt_f_f_w_alt_mask",
AltFmt = 1 in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "b", [["wv", "vw"]]>;
let OverloadedName = "vfncvt_sat_f_bf16_f8e4m3" in
let OverloadedName = "vfncvt_sat_f_f8e4m3" in
defm : RVVConvBuiltinSet<"vfncvt_sat_f_f_w", "a", [["wv", "vw"]]>;
let OverloadedName = "vfncvt_sat_f_bf16_f8e5m2",
let OverloadedName = "vfncvt_sat_f_f8e5m2",
IRName = "vfncvt_sat_f_f_w_alt",
MaskedIRName = "vfncvt_sat_f_f_w_alt_mask",
AltFmt = 1 in

View File

@@ -1166,7 +1166,7 @@ enum PredefinedTypeIDs {
///
/// Type IDs for non-predefined types will start at
/// NUM_PREDEF_TYPE_IDs.
const unsigned NUM_PREDEF_TYPE_IDS = 515;
const unsigned NUM_PREDEF_TYPE_IDS = 529;
// Ensure we do not overrun the predefined types we reserved
// in the enum PredefinedTypeIDs above.

View File

@@ -137,9 +137,24 @@ static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), false);
break;
case ScalarTypeKind::FloatE4M3:
case ScalarTypeKind::FloatE5M2:
QT = Context.getIntTypeForBitwidth(8, false);
break;
case ScalarTypeKind::FloatE5M2: {
// TODO: This is a workaround code to only support OP8 RVV types without
// supporting scalar OFP8 types. We need to refactor after scalar types are
// supported.
assert(Type->isVector() && "Only support vector of OFP8 types.");
bool IsE5M2 = Type->getScalarType() == ScalarTypeKind::FloatE5M2;
unsigned Scale = *Type->getScale();
#define RVV_VECTOR_TYPE_OFP8(Name, Id, SingletonId, NumEls, E5m2) \
if (IsE5M2 == E5m2 && Scale == NumEls) \
QT = Context.SingletonId;
#include "clang/Basic/RISCVVTypes.def"
assert(!QT.isNull() && "Unsupported OFP8 vector type");
if (Type->isConstant())
QT = Context.getConstType(QT);
if (Type->isPointer())
QT = Context.getPointerType(QT);
return QT;
}
case ScalarTypeKind::BFloat:
QT = Context.BFloat16Ty;
break;
@@ -1513,11 +1528,23 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
const llvm::StringMap<bool> &FeatureMap) {
const BuiltinType *BT = Ty->castAs<BuiltinType>();
ASTContext::BuiltinVectorTypeInfo Info =
SemaRef.Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>());
SemaRef.Context.getBuiltinVectorTypeInfo(BT);
unsigned EltSize = SemaRef.Context.getTypeSize(Info.ElementType);
unsigned MinElts = Info.EC.getKnownMinValue();
auto IsOFP8Type = [](const BuiltinType *BT) {
switch (BT->getKind()) {
#define RVV_VECTOR_TYPE_OFP8(Name, Id, SingletonId, NumEls, E5m2) \
case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
return true;
default:
return false;
}
};
if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
!FeatureMap.lookup("zve64d"))
Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zve64d";
@@ -1554,6 +1581,8 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
// if we don't have at least zve32x supported, then we need to emit error.
else if (!FeatureMap.lookup("zve32x"))
Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zve32x";
else if (IsOFP8Type(BT) && !FeatureMap.lookup("experimental-zvfofp8min"))
Diag(Loc, diag::err_riscv_type_requires_extension) << Ty << "zvfofp8min";
}
/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the

View File

@@ -250,10 +250,14 @@ void RVVType::initClangBuiltinStr() {
ClangBuiltinStr += "int";
break;
case ScalarTypeKind::UnsignedInteger:
case ScalarTypeKind::FloatE4M3:
case ScalarTypeKind::FloatE5M2:
ClangBuiltinStr += "uint";
break;
case ScalarTypeKind::FloatE4M3:
ClangBuiltinStr += "float8e4m3" + LMUL.str() + "_t";
return;
case ScalarTypeKind::FloatE5M2:
ClangBuiltinStr += "float8e5m2" + LMUL.str() + "_t";
return;
default:
llvm_unreachable("ScalarTypeKind is invalid");
}
@@ -327,10 +331,14 @@ void RVVType::initTypeStr() {
Str += getTypeString("int");
break;
case ScalarTypeKind::UnsignedInteger:
case ScalarTypeKind::FloatE4M3:
case ScalarTypeKind::FloatE5M2:
Str += getTypeString("uint");
break;
case ScalarTypeKind::FloatE4M3:
Str += "vfloat8e4m3" + LMUL.str() + "_t";
break;
case ScalarTypeKind::FloatE5M2:
Str += "vfloat8e5m2" + LMUL.str() + "_t";
break;
default:
llvm_unreachable("ScalarType is invalid!");
}

View File

@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
vfloat8e4m3mf8_t test_vfncvt_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf8(vs2, vl);
}
@@ -23,7 +23,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf8(vs2, vl);
}
@@ -33,7 +33,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
vfloat8e4m3mf4_t test_vfncvt_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf4(vs2, vl);
}
@@ -43,7 +43,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf4(vs2, vl);
}
@@ -53,7 +53,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
vfloat8e4m3mf2_t test_vfncvt_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf2(vs2, vl);
}
@@ -63,7 +63,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf2(vs2, vl);
}
@@ -73,7 +73,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3m1(vs2, vl);
}
@@ -83,7 +83,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_sat_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3m1(vs2, vl);
}
@@ -93,7 +93,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3m2(vs2, vl);
}
@@ -103,7 +103,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_sat_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3m2(vs2, vl);
}
@@ -113,7 +113,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e4m3mf8_t test_vfncvt_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf8_m(vm, vs2, vl);
}
@@ -124,7 +124,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf8_m(vm, vs2, vl);
}
@@ -135,7 +135,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e4m3mf4_t test_vfncvt_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf4_m(vm, vs2, vl);
}
@@ -146,7 +146,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf4_m(vm, vs2, vl);
}
@@ -157,7 +157,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e4m3mf2_t test_vfncvt_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf2_m(vm, vs2, vl);
}
@@ -168,7 +168,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf2_m(vm, vs2, vl);
}
@@ -179,7 +179,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e4m3m1_t test_vfncvt_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3m1_m(vm, vs2, vl);
}
@@ -190,7 +190,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e4m3m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3m1_m(vm, vs2, vl);
}
@@ -201,7 +201,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e4m3m2_t test_vfncvt_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3m2_m(vm, vs2, vl);
}
@@ -212,7 +212,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e4m3m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3m2_m(vm, vs2, vl);
}
@@ -223,7 +223,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
vfloat8e4m3mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf8_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -233,7 +233,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf8_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -243,7 +243,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
vfloat8e4m3mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf4_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -253,7 +253,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf4_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -263,7 +263,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
vfloat8e4m3mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -273,7 +273,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -283,7 +283,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3m1_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -293,7 +293,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3m1_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -303,7 +303,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3m2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -313,7 +313,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3m2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -323,7 +323,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e4m3mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -334,7 +334,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm,
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm,
vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -345,7 +345,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e4m3mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -356,7 +356,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -367,7 +367,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e4m3mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -378,7 +378,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -389,7 +389,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e4m3m1_t test_vfncvt_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -400,7 +400,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e4m3m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -411,7 +411,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e4m3m2_t test_vfncvt_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e4m3m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -422,7 +422,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e4m3m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e4m3m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -433,7 +433,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
vfloat8e5m2mf8_t test_vfncvt_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf8(vs2, vl);
}
@@ -443,7 +443,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf8(vs2, vl);
}
@@ -453,7 +453,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
vfloat8e5m2mf4_t test_vfncvt_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf4(vs2, vl);
}
@@ -463,7 +463,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf4(vs2, vl);
}
@@ -473,7 +473,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
vfloat8e5m2mf2_t test_vfncvt_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf2(vs2, vl);
}
@@ -483,7 +483,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf2(vs2, vl);
}
@@ -493,7 +493,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2m1(vs2, vl);
}
@@ -503,7 +503,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_sat_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2m1(vs2, vl);
}
@@ -513,7 +513,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2m2(vs2, vl);
}
@@ -523,7 +523,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_sat_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2m2(vs2, vl);
}
@@ -533,7 +533,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e5m2mf8_t test_vfncvt_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf8_m(vm, vs2, vl);
}
@@ -544,7 +544,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf8_m(vm, vs2, vl);
}
@@ -555,7 +555,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e5m2mf4_t test_vfncvt_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf4_m(vm, vs2, vl);
}
@@ -566,7 +566,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf4_m(vm, vs2, vl);
}
@@ -577,7 +577,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e5m2mf2_t test_vfncvt_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf2_m(vm, vs2, vl);
}
@@ -588,7 +588,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf2_m(vm, vs2, vl);
}
@@ -599,7 +599,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e5m2m1_t test_vfncvt_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2m1_m(vm, vs2, vl);
}
@@ -610,7 +610,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e5m2m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2m1_m(vm, vs2, vl);
}
@@ -621,7 +621,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e5m2m2_t test_vfncvt_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2m2_m(vm, vs2, vl);
}
@@ -632,7 +632,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e5m2m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2m2_m(vm, vs2, vl);
}
@@ -643,7 +643,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
vfloat8e5m2mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf8_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -653,7 +653,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf8_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -663,7 +663,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
vfloat8e5m2mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf4_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -673,7 +673,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf4_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -683,7 +683,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
vfloat8e5m2mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -693,7 +693,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -703,7 +703,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2m1_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -713,7 +713,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2m1_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -723,7 +723,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2m2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -733,7 +733,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2m2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -743,7 +743,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e5m2mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -754,7 +754,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm,
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm,
vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -765,7 +765,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e5m2mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -776,7 +776,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -787,7 +787,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e5m2mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -798,7 +798,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -809,7 +809,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e5m2m1_t test_vfncvt_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -820,7 +820,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e5m2m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -831,7 +831,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e5m2m2_t test_vfncvt_f_f_q_f8e5m2m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_q_f8e5m2m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -842,7 +842,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e5m2m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_q_f8e5m2m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
}

View File

@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8(vbfloat16mf4_t vs2, size_t vl) {
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8(vbfloat16mf4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8(vs2, vl);
}
@@ -23,7 +23,7 @@ vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8(vbfloat16mf4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8(vbfloat16mf4_t vs2,
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8(vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8(vs2, vl);
}
@@ -34,7 +34,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8(vbfloat16mf4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4(vbfloat16mf2_t vs2, size_t vl) {
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4(vbfloat16mf2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4(vs2, vl);
}
@@ -44,7 +44,7 @@ vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4(vbfloat16mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4(vbfloat16mf2_t vs2,
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4(vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4(vs2, vl);
}
@@ -55,7 +55,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4(vbfloat16mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2(vbfloat16m1_t vs2, size_t vl) {
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2(vbfloat16m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2(vs2, vl);
}
@@ -65,7 +65,7 @@ vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2(vbfloat16m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2(vbfloat16m1_t vs2,
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2(vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2(vs2, vl);
}
@@ -76,7 +76,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2(vbfloat16m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1(vbfloat16m2_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1(vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1(vs2, vl);
}
@@ -86,7 +86,7 @@ vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1(vbfloat16m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1(vbfloat16m2_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1(vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1(vs2, vl);
}
@@ -96,7 +96,7 @@ vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1(vbfloat16m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2(vbfloat16m4_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2(vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2(vs2, vl);
}
@@ -106,7 +106,7 @@ vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2(vbfloat16m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2(vbfloat16m4_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2(vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2(vs2, vl);
}
@@ -116,7 +116,7 @@ vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2(vbfloat16m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4(vbfloat16m8_t vs2, size_t vl) {
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4(vbfloat16m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4(vs2, vl);
}
@@ -126,7 +126,7 @@ vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4(vbfloat16m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4(vbfloat16m8_t vs2, size_t vl) {
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4(vbfloat16m8_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4(vs2, vl);
}
@@ -136,7 +136,7 @@ vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4(vbfloat16m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_m(vbool64_t vm,
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_m(vbool64_t vm,
vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_m(vm, vs2, vl);
@@ -148,7 +148,7 @@ vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_m(vbool64_t vm,
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_m(vbool64_t vm,
vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_m(vm, vs2, vl);
@@ -160,7 +160,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_m(vbool32_t vm,
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_m(vbool32_t vm,
vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_m(vm, vs2, vl);
@@ -172,7 +172,7 @@ vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_m(vbool32_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_m(vbool32_t vm,
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_m(vbool32_t vm,
vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_m(vm, vs2, vl);
@@ -184,7 +184,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_m(vbool32_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_m(vbool16_t vm,
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_m(vbool16_t vm,
vbfloat16m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_m(vm, vs2, vl);
}
@@ -195,7 +195,7 @@ vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_m(vbool16_t vm,
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_m(vbool16_t vm,
vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_m(vm, vs2, vl);
@@ -207,7 +207,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_m(vbool8_t vm, vbfloat16m2_t vs2,
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_m(vbool8_t vm, vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_m(vm, vs2, vl);
}
@@ -218,7 +218,7 @@ vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_m(vbool8_t vm, vbfloat16m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_m(vbool8_t vm,
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_m(vbool8_t vm,
vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_m(vm, vs2, vl);
@@ -230,7 +230,7 @@ vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_m(vbool8_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_m(vbool4_t vm, vbfloat16m4_t vs2,
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_m(vbool4_t vm, vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_m(vm, vs2, vl);
}
@@ -241,7 +241,7 @@ vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_m(vbool4_t vm, vbfloat16m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_m(vbool4_t vm,
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_m(vbool4_t vm,
vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_m(vm, vs2, vl);
@@ -253,7 +253,7 @@ vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_m(vbool4_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_m(vbool2_t vm, vbfloat16m8_t vs2,
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_m(vbool2_t vm, vbfloat16m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_m(vm, vs2, vl);
}
@@ -264,7 +264,7 @@ vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_m(vbool2_t vm, vbfloat16m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_m(vbool2_t vm,
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_m(vbool2_t vm,
vbfloat16m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_m(vm, vs2, vl);
@@ -276,7 +276,7 @@ vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_m(vbool2_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm(vbfloat16mf4_t vs2,
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm(vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -287,7 +287,7 @@ vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm(vbfloat16mf4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm(vbfloat16mf4_t vs2,
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm(vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm(vs2, __RISCV_FRM_RNE,
vl);
@@ -299,7 +299,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm(vbfloat16mf4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm(vbfloat16mf2_t vs2,
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm(vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -310,7 +310,7 @@ vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm(vbfloat16mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm(vbfloat16mf2_t vs2,
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm(vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm(vs2, __RISCV_FRM_RNE,
vl);
@@ -322,7 +322,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm(vbfloat16mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm(vbfloat16m1_t vs2,
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm(vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -333,7 +333,7 @@ vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm(vbfloat16m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm(vbfloat16m1_t vs2,
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm(vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -344,7 +344,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm(vbfloat16m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm(vbfloat16m2_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm(vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -354,7 +354,7 @@ vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm(vbfloat16m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm(vbfloat16m2_t vs2,
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm(vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -365,7 +365,7 @@ vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm(vbfloat16m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm(vbfloat16m4_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm(vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -375,7 +375,7 @@ vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm(vbfloat16m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm(vbfloat16m4_t vs2,
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm(vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -386,7 +386,7 @@ vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm(vbfloat16m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm(vbfloat16m8_t vs2, size_t vl) {
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm(vbfloat16m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -396,7 +396,7 @@ vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm(vbfloat16m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm(vbfloat16m8_t vs2,
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm(vbfloat16m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -407,7 +407,7 @@ vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm(vbfloat16m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_m(vbool64_t vm,
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_m(vbool64_t vm,
vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -420,7 +420,7 @@ vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_m(vbool64_t vm,
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_m(vbool64_t vm,
vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_m(vm, vs2,
@@ -433,7 +433,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_m(vbool32_t vm,
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_m(vbool32_t vm,
vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -446,7 +446,7 @@ vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_m(vbool32_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_m(vbool32_t vm,
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_m(vbool32_t vm,
vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_m(vm, vs2,
@@ -459,7 +459,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_m(vbool32_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_m(vbool16_t vm,
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_m(vbool16_t vm,
vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -472,7 +472,7 @@ vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_m(vbool16_t vm,
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_m(vbool16_t vm,
vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_m(vm, vs2,
@@ -485,7 +485,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_m(vbool8_t vm,
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_m(vbool8_t vm,
vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -498,7 +498,7 @@ vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_m(vbool8_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_m(vbool8_t vm,
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_m(vbool8_t vm,
vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -511,7 +511,7 @@ vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_m(vbool8_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_m(vbool4_t vm,
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_m(vbool4_t vm,
vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -524,7 +524,7 @@ vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_m(vbool4_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_m(vbool4_t vm,
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_m(vbool4_t vm,
vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -537,7 +537,7 @@ vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_m(vbool4_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_m(vbool2_t vm,
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_m(vbool2_t vm,
vbfloat16m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -550,7 +550,7 @@ vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_m(vbool2_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_m(vbool2_t vm,
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_m(vbool2_t vm,
vbfloat16m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -563,7 +563,7 @@ vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_m(vbool2_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8(vbfloat16mf4_t vs2, size_t vl) {
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8(vbfloat16mf4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8(vs2, vl);
}
@@ -573,7 +573,7 @@ vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8(vbfloat16mf4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8(vbfloat16mf4_t vs2,
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8(vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8(vs2, vl);
}
@@ -584,7 +584,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8(vbfloat16mf4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4(vbfloat16mf2_t vs2, size_t vl) {
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4(vbfloat16mf2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4(vs2, vl);
}
@@ -594,7 +594,7 @@ vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4(vbfloat16mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4(vbfloat16mf2_t vs2,
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4(vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4(vs2, vl);
}
@@ -605,7 +605,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4(vbfloat16mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2(vbfloat16m1_t vs2, size_t vl) {
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2(vbfloat16m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2(vs2, vl);
}
@@ -615,7 +615,7 @@ vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2(vbfloat16m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2(vbfloat16m1_t vs2,
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2(vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2(vs2, vl);
}
@@ -626,7 +626,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2(vbfloat16m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1(vbfloat16m2_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1(vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1(vs2, vl);
}
@@ -636,7 +636,7 @@ vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1(vbfloat16m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1(vbfloat16m2_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1(vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1(vs2, vl);
}
@@ -646,7 +646,7 @@ vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1(vbfloat16m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2(vbfloat16m4_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2(vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2(vs2, vl);
}
@@ -656,7 +656,7 @@ vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2(vbfloat16m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2(vbfloat16m4_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2(vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2(vs2, vl);
}
@@ -666,7 +666,7 @@ vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2(vbfloat16m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4(vbfloat16m8_t vs2, size_t vl) {
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4(vbfloat16m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4(vs2, vl);
}
@@ -676,7 +676,7 @@ vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4(vbfloat16m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4(vbfloat16m8_t vs2, size_t vl) {
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4(vbfloat16m8_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4(vs2, vl);
}
@@ -686,7 +686,7 @@ vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4(vbfloat16m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_m(vbool64_t vm,
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_m(vbool64_t vm,
vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_m(vm, vs2, vl);
@@ -698,7 +698,7 @@ vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_m(vbool64_t vm,
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_m(vbool64_t vm,
vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_m(vm, vs2, vl);
@@ -710,7 +710,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_m(vbool32_t vm,
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_m(vbool32_t vm,
vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_m(vm, vs2, vl);
@@ -722,7 +722,7 @@ vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_m(vbool32_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_m(vbool32_t vm,
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_m(vbool32_t vm,
vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_m(vm, vs2, vl);
@@ -734,7 +734,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_m(vbool32_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_m(vbool16_t vm,
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_m(vbool16_t vm,
vbfloat16m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_m(vm, vs2, vl);
}
@@ -745,7 +745,7 @@ vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_m(vbool16_t vm,
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_m(vbool16_t vm,
vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_m(vm, vs2, vl);
@@ -757,7 +757,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_m(vbool8_t vm, vbfloat16m2_t vs2,
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_m(vbool8_t vm, vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_m(vm, vs2, vl);
}
@@ -768,7 +768,7 @@ vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_m(vbool8_t vm, vbfloat16m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_m(vbool8_t vm,
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_m(vbool8_t vm,
vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_m(vm, vs2, vl);
@@ -780,7 +780,7 @@ vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_m(vbool8_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_m(vbool4_t vm, vbfloat16m4_t vs2,
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_m(vbool4_t vm, vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_m(vm, vs2, vl);
}
@@ -791,7 +791,7 @@ vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_m(vbool4_t vm, vbfloat16m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_m(vbool4_t vm,
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_m(vbool4_t vm,
vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_m(vm, vs2, vl);
@@ -803,7 +803,7 @@ vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_m(vbool4_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_m(vbool2_t vm, vbfloat16m8_t vs2,
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_m(vbool2_t vm, vbfloat16m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_m(vm, vs2, vl);
}
@@ -814,7 +814,7 @@ vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_m(vbool2_t vm, vbfloat16m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_m(vbool2_t vm,
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_m(vbool2_t vm,
vbfloat16m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_m(vm, vs2, vl);
@@ -826,7 +826,7 @@ vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_m(vbool2_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm(vbfloat16mf4_t vs2,
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm(vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -837,7 +837,7 @@ vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm(vbfloat16mf4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm(vbfloat16mf4_t vs2,
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm(vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm(vs2, __RISCV_FRM_RNE,
vl);
@@ -849,7 +849,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm(vbfloat16mf4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm(vbfloat16mf2_t vs2,
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm(vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -860,7 +860,7 @@ vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm(vbfloat16mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm(vbfloat16mf2_t vs2,
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm(vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm(vs2, __RISCV_FRM_RNE,
vl);
@@ -872,7 +872,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm(vbfloat16mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm(vbfloat16m1_t vs2,
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm(vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -883,7 +883,7 @@ vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm(vbfloat16m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm(vbfloat16m1_t vs2,
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm(vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -894,7 +894,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm(vbfloat16m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm(vbfloat16m2_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm(vbfloat16m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -904,7 +904,7 @@ vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm(vbfloat16m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm(vbfloat16m2_t vs2,
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm(vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -915,7 +915,7 @@ vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm(vbfloat16m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm(vbfloat16m4_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm(vbfloat16m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -925,7 +925,7 @@ vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm(vbfloat16m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm(vbfloat16m4_t vs2,
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm(vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -936,7 +936,7 @@ vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm(vbfloat16m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm(vbfloat16m8_t vs2, size_t vl) {
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm(vbfloat16m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -946,7 +946,7 @@ vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm(vbfloat16m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm(vbfloat16m8_t vs2,
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm(vbfloat16m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm(vs2, __RISCV_FRM_RNE, vl);
}
@@ -957,7 +957,7 @@ vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm(vbfloat16m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_m(vbool64_t vm,
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_m(vbool64_t vm,
vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -970,7 +970,7 @@ vuint8mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_m(vbool64_t vm,
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_m(vbool64_t vm,
vbfloat16mf4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_m(vm, vs2,
@@ -983,7 +983,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_m(vbool32_t vm,
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_m(vbool32_t vm,
vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -996,7 +996,7 @@ vuint8mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_m(vbool32_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_m(vbool32_t vm,
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_m(vbool32_t vm,
vbfloat16mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_m(vm, vs2,
@@ -1009,7 +1009,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_m(vbool32_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_m(vbool16_t vm,
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_m(vbool16_t vm,
vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -1022,7 +1022,7 @@ vuint8mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_m(vbool16_t vm,
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_m(vbool16_t vm,
vbfloat16m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_m(vm, vs2,
@@ -1035,7 +1035,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_m(vbool8_t vm,
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_m(vbool8_t vm,
vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -1048,7 +1048,7 @@ vuint8m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_m(vbool8_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_m(vbool8_t vm,
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_m(vbool8_t vm,
vbfloat16m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -1061,7 +1061,7 @@ vuint8m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_m(vbool8_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_m(vbool4_t vm,
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_m(vbool4_t vm,
vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -1074,7 +1074,7 @@ vuint8m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_m(vbool4_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_m(vbool4_t vm,
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_m(vbool4_t vm,
vbfloat16m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -1087,7 +1087,7 @@ vuint8m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_m(vbool4_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_m(vbool2_t vm,
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_m(vbool2_t vm,
vbfloat16m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_m(vm, vs2, __RISCV_FRM_RNE,
@@ -1100,7 +1100,7 @@ vuint8m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_m(vbool2_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_m(vbool2_t vm,
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_m(vbool2_t vm,
vbfloat16m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_m(vm, vs2, __RISCV_FRM_RNE,

View File

@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4(vuint8mf8_t vs2, size_t vl) {
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4(vfloat8e4m3mf8_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4(vs2, vl);
}
@@ -23,7 +23,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4(vuint8mf8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2(vuint8mf4_t vs2, size_t vl) {
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2(vfloat8e4m3mf4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2(vs2, vl);
}
@@ -33,7 +33,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2(vuint8mf4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1(vuint8mf2_t vs2, size_t vl) {
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1(vfloat8e4m3mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf2_bf16m1(vs2, vl);
}
@@ -43,7 +43,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1(vuint8mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2(vuint8m1_t vs2, size_t vl) {
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2(vfloat8e4m3m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m1_bf16m2(vs2, vl);
}
@@ -53,7 +53,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2(vuint8m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4(vuint8m2_t vs2, size_t vl) {
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4(vfloat8e4m3m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m2_bf16m4(vs2, vl);
}
@@ -63,7 +63,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4(vuint8m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8(vuint8m4_t vs2, size_t vl) {
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8(vfloat8e4m3m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m4_bf16m8(vs2, vl);
}
@@ -74,7 +74,7 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8(vuint8m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_m(vbool64_t vm,
vuint8mf8_t vs2,
vfloat8e4m3mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_m(vm, vs2, vl);
}
@@ -86,7 +86,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_m(vbool64_t vm,
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_m(vbool32_t vm,
vuint8mf4_t vs2,
vfloat8e4m3mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_m(vm, vs2, vl);
}
@@ -98,7 +98,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_m(vbool32_t vm,
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_m(vbool16_t vm,
vuint8mf2_t vs2, size_t vl) {
vfloat8e4m3mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_m(vm, vs2, vl);
}
@@ -108,7 +108,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_m(vbool8_t vm, vfloat8e4m3m1_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m1_bf16m2_m(vm, vs2, vl);
}
@@ -119,7 +119,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_m(vbool4_t vm, vfloat8e4m3m2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m2_bf16m4_m(vm, vs2, vl);
}
@@ -130,7 +130,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_m(vbool2_t vm, vfloat8e4m3m4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m4_bf16m8_m(vm, vs2, vl);
}
@@ -141,7 +141,7 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4(vuint8mf8_t vs2, size_t vl) {
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4(vfloat8e5m2mf8_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4(vs2, vl);
}
@@ -151,7 +151,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4(vuint8mf8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2(vuint8mf4_t vs2, size_t vl) {
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2(vfloat8e5m2mf4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2(vs2, vl);
}
@@ -161,7 +161,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2(vuint8mf4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1(vuint8mf2_t vs2, size_t vl) {
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1(vfloat8e5m2mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf2_bf16m1(vs2, vl);
}
@@ -171,7 +171,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1(vuint8mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2(vuint8m1_t vs2, size_t vl) {
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2(vfloat8e5m2m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m1_bf16m2(vs2, vl);
}
@@ -181,7 +181,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2(vuint8m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4(vuint8m2_t vs2, size_t vl) {
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4(vfloat8e5m2m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m2_bf16m4(vs2, vl);
}
@@ -191,7 +191,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4(vuint8m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8(vuint8m4_t vs2, size_t vl) {
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8(vfloat8e5m2m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m4_bf16m8(vs2, vl);
}
@@ -202,7 +202,7 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8(vuint8m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_m(vbool64_t vm,
vuint8mf8_t vs2,
vfloat8e5m2mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_m(vm, vs2, vl);
}
@@ -214,7 +214,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_m(vbool64_t vm,
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_m(vbool32_t vm,
vuint8mf4_t vs2,
vfloat8e5m2mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_m(vm, vs2, vl);
}
@@ -226,7 +226,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_m(vbool32_t vm,
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_m(vbool16_t vm,
vuint8mf2_t vs2, size_t vl) {
vfloat8e5m2mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_m(vm, vs2, vl);
}
@@ -236,7 +236,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_m(vbool8_t vm, vfloat8e5m2m1_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m1_bf16m2_m(vm, vs2, vl);
}
@@ -247,7 +247,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_m(vbool4_t vm, vfloat8e5m2m2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m2_bf16m4_m(vm, vs2, vl);
}
@@ -258,7 +258,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_m(vbool2_t vm, vfloat8e5m2m4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m4_bf16m8_m(vm, vs2, vl);
}

View File

@@ -13,7 +13,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
vfloat8e4m3mf8_t test_vfncvt_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vs2, vl);
}
@@ -23,7 +23,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vs2, vl);
}
@@ -33,7 +33,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
vfloat8e4m3mf4_t test_vfncvt_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vs2, vl);
}
@@ -43,7 +43,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vs2, vl);
}
@@ -53,7 +53,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
vfloat8e4m3mf2_t test_vfncvt_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vs2, vl);
}
@@ -63,7 +63,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vs2, vl);
}
@@ -73,7 +73,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vs2, vl);
}
@@ -83,7 +83,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_sat_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vs2, vl);
}
@@ -93,7 +93,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vs2, vl);
}
@@ -103,7 +103,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_sat_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vs2, vl);
}
@@ -113,7 +113,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e4m3mf8_t test_vfncvt_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vm, vs2, vl);
}
@@ -124,7 +124,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vm, vs2, vl);
}
@@ -135,7 +135,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e4m3mf4_t test_vfncvt_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vm, vs2, vl);
}
@@ -146,7 +146,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vm, vs2, vl);
}
@@ -157,7 +157,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e4m3mf2_t test_vfncvt_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vm, vs2, vl);
}
@@ -168,7 +168,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vm, vs2, vl);
}
@@ -179,7 +179,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e4m3m1_t test_vfncvt_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vm, vs2, vl);
}
@@ -190,7 +190,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e4m3m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vm, vs2, vl);
}
@@ -201,7 +201,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e4m3m2_t test_vfncvt_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vm, vs2, vl);
}
@@ -212,7 +212,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e4m3m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vm, vs2, vl);
}
@@ -223,7 +223,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
vfloat8e4m3mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vs2, __RISCV_FRM_RNE, vl);
}
@@ -233,7 +233,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vs2, __RISCV_FRM_RNE, vl);
}
@@ -243,7 +243,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
vfloat8e4m3mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vs2, __RISCV_FRM_RNE, vl);
}
@@ -253,7 +253,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vs2, __RISCV_FRM_RNE, vl);
}
@@ -263,7 +263,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
vfloat8e4m3mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vs2, __RISCV_FRM_RNE, vl);
}
@@ -273,7 +273,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vs2, __RISCV_FRM_RNE, vl);
}
@@ -283,7 +283,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vs2, __RISCV_FRM_RNE, vl);
}
@@ -293,7 +293,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
vfloat8e4m3m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vs2, __RISCV_FRM_RNE, vl);
}
@@ -303,7 +303,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vs2, __RISCV_FRM_RNE, vl);
}
@@ -313,7 +313,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
vfloat8e4m3m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vs2, __RISCV_FRM_RNE, vl);
}
@@ -323,7 +323,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e4m3mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -334,7 +334,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm,
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm,
vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -345,7 +345,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e4m3mf8_rm_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e4m3mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -356,7 +356,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -367,7 +367,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e4m3mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e4m3mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -378,7 +378,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -389,7 +389,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e4m3mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e4m3m1_t test_vfncvt_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -400,7 +400,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e4m3m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -411,7 +411,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e4m3m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e4m3m2_t test_vfncvt_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e4m3(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -422,7 +422,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e4m3m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e4m3(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -433,7 +433,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e4m3m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
vfloat8e5m2mf8_t test_vfncvt_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vs2, vl);
}
@@ -443,7 +443,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vs2, vl);
}
@@ -453,7 +453,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
vfloat8e5m2mf4_t test_vfncvt_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vs2, vl);
}
@@ -463,7 +463,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vs2, vl);
}
@@ -473,7 +473,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
vfloat8e5m2mf2_t test_vfncvt_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vs2, vl);
}
@@ -483,7 +483,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vs2, vl);
}
@@ -493,7 +493,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vs2, vl);
}
@@ -503,7 +503,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_sat_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vs2, vl);
}
@@ -513,7 +513,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vs2, vl);
}
@@ -523,7 +523,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_sat_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vs2, vl);
}
@@ -533,7 +533,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e5m2mf8_t test_vfncvt_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vm, vs2, vl);
}
@@ -544,7 +544,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vm, vs2, vl);
}
@@ -555,7 +555,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e5m2mf4_t test_vfncvt_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vm, vs2, vl);
}
@@ -566,7 +566,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vm, vs2, vl);
}
@@ -577,7 +577,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e5m2mf2_t test_vfncvt_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vm, vs2, vl);
}
@@ -588,7 +588,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vm, vs2, vl);
}
@@ -599,7 +599,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e5m2m1_t test_vfncvt_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vm, vs2, vl);
}
@@ -610,7 +610,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e5m2m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vm, vs2, vl);
}
@@ -621,7 +621,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e5m2m2_t test_vfncvt_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vm, vs2, vl);
}
@@ -632,7 +632,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e5m2m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vm, vs2, vl);
}
@@ -643,7 +643,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
vfloat8e5m2mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vs2, __RISCV_FRM_RNE, vl);
}
@@ -653,7 +653,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vs2, __RISCV_FRM_RNE, vl);
}
@@ -663,7 +663,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm(vfloat32mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
vfloat8e5m2mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vs2, __RISCV_FRM_RNE, vl);
}
@@ -673,7 +673,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vs2, __RISCV_FRM_RNE, vl);
}
@@ -683,7 +683,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm(vfloat32m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
vfloat8e5m2mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vs2, __RISCV_FRM_RNE, vl);
}
@@ -693,7 +693,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vs2, __RISCV_FRM_RNE, vl);
}
@@ -703,7 +703,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm(vfloat32m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vs2, __RISCV_FRM_RNE, vl);
}
@@ -713,7 +713,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
vfloat8e5m2m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vs2, __RISCV_FRM_RNE, vl);
}
@@ -723,7 +723,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm(vfloat32m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vs2, __RISCV_FRM_RNE, vl);
}
@@ -733,7 +733,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
vfloat8e5m2m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vs2, __RISCV_FRM_RNE, vl);
}
@@ -743,7 +743,7 @@ vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_rm(vfloat32m8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
vfloat8e5m2mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -754,7 +754,7 @@ vuint8mf8_t test_vfncvt_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm,
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm,
vfloat32mf2_t vs2, size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -765,7 +765,7 @@ vuint8mf8_t test_vfncvt_sat_f_f_q_f8e5m2mf8_rm_m(vbool64_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e5m2mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -776,7 +776,7 @@ vuint8mf4_t test_vfncvt_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -787,7 +787,7 @@ vuint8mf4_t test_vfncvt_sat_f_f_q_f8e5m2mf4_rm_m(vbool32_t vm, vfloat32m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e5m2mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -798,7 +798,7 @@ vuint8mf2_t test_vfncvt_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -809,7 +809,7 @@ vuint8mf2_t test_vfncvt_sat_f_f_q_f8e5m2mf2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e5m2m1_t test_vfncvt_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -820,7 +820,7 @@ vuint8m1_t test_vfncvt_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
vfloat8e5m2m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -831,7 +831,7 @@ vuint8m1_t test_vfncvt_sat_f_f_q_f8e5m2m1_rm_m(vbool8_t vm, vfloat32m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e5m2m2_t test_vfncvt_f_f_q_f8e5m2m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_f_f8e5m2(vm, vs2, __RISCV_FRM_RNE, vl);
}
@@ -842,7 +842,7 @@ vuint8m2_t test_vfncvt_f_f_q_f8e5m2m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
vfloat8e5m2m2_t test_vfncvt_sat_f_f_q_f8e5m2m2_rm_m(vbool4_t vm, vfloat32m8_t vs2,
size_t vl) {
return __riscv_vfncvt_sat_f_f8e5m2(vm, vs2, __RISCV_FRM_RNE, vl);
}

View File

@@ -13,8 +13,8 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4(vuint8mf8_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vs2, vl);
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4(vfloat8e4m3mf8_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2(
@@ -23,8 +23,8 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4(vuint8mf8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2(vuint8mf4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vs2, vl);
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2(vfloat8e4m3mf4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1(
@@ -33,8 +33,8 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2(vuint8mf4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1(vuint8mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vs2, vl);
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1(vfloat8e4m3mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m1_bf16m2(
@@ -43,8 +43,8 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1(vuint8mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2(vuint8m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vs2, vl);
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2(vfloat8e4m3m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m2_bf16m4(
@@ -53,8 +53,8 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2(vuint8m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4(vuint8m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vs2, vl);
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4(vfloat8e4m3m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m4_bf16m8(
@@ -63,8 +63,8 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4(vuint8m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8(vuint8m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vs2, vl);
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8(vfloat8e4m3m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_m(
@@ -74,9 +74,9 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8(vuint8m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_m(vbool64_t vm,
vuint8mf8_t vs2,
vfloat8e4m3mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vm, vs2, vl);
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_m(
@@ -86,9 +86,9 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_m(vbool64_t vm,
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_m(vbool32_t vm,
vuint8mf4_t vs2,
vfloat8e4m3mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vm, vs2, vl);
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_m(
@@ -98,8 +98,8 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_m(vbool32_t vm,
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_m(vbool16_t vm,
vuint8mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vm, vs2, vl);
vfloat8e4m3mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_m(
@@ -108,9 +108,9 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_m(vbool8_t vm, vfloat8e4m3m1_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vm, vs2, vl);
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_m(
@@ -119,9 +119,9 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_m(vbool4_t vm, vfloat8e4m3m2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vm, vs2, vl);
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_m(
@@ -130,9 +130,9 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_m(vbool2_t vm, vfloat8e4m3m4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16(vm, vs2, vl);
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4(
@@ -141,8 +141,8 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4(vuint8mf8_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vs2, vl);
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4(vfloat8e5m2mf8_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2(
@@ -151,8 +151,8 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4(vuint8mf8_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2(vuint8mf4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vs2, vl);
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2(vfloat8e5m2mf4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1(
@@ -161,8 +161,8 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2(vuint8mf4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1(vuint8mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vs2, vl);
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1(vfloat8e5m2mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m1_bf16m2(
@@ -171,8 +171,8 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1(vuint8mf2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2(vuint8m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vs2, vl);
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2(vfloat8e5m2m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m2_bf16m4(
@@ -181,8 +181,8 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2(vuint8m1_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4(vuint8m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vs2, vl);
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4(vfloat8e5m2m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m4_bf16m8(
@@ -191,8 +191,8 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4(vuint8m2_t vs2, size_t vl) {
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8(vuint8m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vs2, vl);
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8(vfloat8e5m2m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_m(
@@ -202,9 +202,9 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8(vuint8m4_t vs2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_m(vbool64_t vm,
vuint8mf8_t vs2,
vfloat8e5m2mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vm, vs2, vl);
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_m(
@@ -214,9 +214,9 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_m(vbool64_t vm,
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_m(vbool32_t vm,
vuint8mf4_t vs2,
vfloat8e5m2mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vm, vs2, vl);
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_m(
@@ -226,8 +226,8 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_m(vbool32_t vm,
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_m(vbool16_t vm,
vuint8mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vm, vs2, vl);
vfloat8e5m2mf2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_m(
@@ -236,9 +236,9 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_m(vbool16_t vm,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_m(vbool8_t vm, vfloat8e5m2m1_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vm, vs2, vl);
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_m(
@@ -247,9 +247,9 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_m(vbool4_t vm, vfloat8e5m2m2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vm, vs2, vl);
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_m(
@@ -258,7 +258,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_m(vbool2_t vm, vfloat8e5m2m4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16(vm, vs2, vl);
return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
}

View File

@@ -14,7 +14,7 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tu(vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e4m3mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tu(vd, vs2, vl);
}
@@ -26,7 +26,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tu(vbfloat16mf4_t vd,
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tu(vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e4m3mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tu(vd, vs2, vl);
}
@@ -38,7 +38,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tu(vbfloat16mf2_t vd,
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tu(vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e4m3mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tu(vd, vs2, vl);
}
@@ -50,7 +50,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tu(vbfloat16m1_t vd,
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tu(vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
vfloat8e4m3m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tu(vd, vs2, vl);
}
@@ -61,7 +61,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tu(vbfloat16m2_t vd,
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tu(vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
vfloat8e4m3m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tu(vd, vs2, vl);
}
@@ -72,7 +72,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tu(vbfloat16m4_t vd,
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tu(vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
vfloat8e4m3m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tu(vd, vs2, vl);
}
@@ -84,7 +84,7 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tu(vbfloat16m8_t vd,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tum(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e4m3mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tum(vm, vd, vs2, vl);
}
@@ -97,7 +97,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tum(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tum(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e4m3mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tum(vm, vd, vs2, vl);
}
@@ -110,7 +110,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tum(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tum(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e4m3mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tum(vm, vd, vs2, vl);
}
@@ -123,7 +123,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tum(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tum(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
vfloat8e4m3m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tum(vm, vd, vs2, vl);
}
@@ -135,7 +135,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tum(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tum(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
vfloat8e4m3m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tum(vm, vd, vs2, vl);
}
@@ -147,7 +147,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tum(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tum(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
vfloat8e4m3m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tum(vm, vd, vs2, vl);
}
@@ -159,7 +159,7 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tum(vbool2_t vm,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tumu(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e4m3mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tumu(vm, vd, vs2, vl);
}
@@ -172,7 +172,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tumu(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tumu(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e4m3mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tumu(vm, vd, vs2, vl);
}
@@ -185,7 +185,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tumu(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tumu(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e4m3mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tumu(vm, vd, vs2, vl);
}
@@ -198,7 +198,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tumu(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tumu(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2,
vfloat8e4m3m1_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tumu(vm, vd, vs2, vl);
}
@@ -211,7 +211,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tumu(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tumu(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2,
vfloat8e4m3m2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tumu(vm, vd, vs2, vl);
}
@@ -224,7 +224,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tumu(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tumu(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2,
vfloat8e4m3m4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tumu(vm, vd, vs2, vl);
}
@@ -237,7 +237,7 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tumu(vbool2_t vm,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_mu(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e4m3mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_mu(vm, vd, vs2, vl);
}
@@ -250,7 +250,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_mu(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_mu(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e4m3mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_mu(vm, vd, vs2, vl);
}
@@ -263,7 +263,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_mu(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_mu(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e4m3mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_mu(vm, vd, vs2, vl);
}
@@ -276,7 +276,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_mu(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_mu(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
vfloat8e4m3m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m1_bf16m2_mu(vm, vd, vs2, vl);
}
@@ -288,7 +288,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_mu(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_mu(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
vfloat8e4m3m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m2_bf16m4_mu(vm, vd, vs2, vl);
}
@@ -300,7 +300,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_mu(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_mu(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
vfloat8e4m3m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e4m3m4_bf16m8_mu(vm, vd, vs2, vl);
}
@@ -311,7 +311,7 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_mu(vbool2_t vm,
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tu(vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e5m2mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tu(vd, vs2, vl);
}
@@ -323,7 +323,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tu(vbfloat16mf4_t vd,
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tu(vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e5m2mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tu(vd, vs2, vl);
}
@@ -335,7 +335,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tu(vbfloat16mf2_t vd,
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tu(vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e5m2mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tu(vd, vs2, vl);
}
@@ -347,7 +347,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tu(vbfloat16m1_t vd,
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tu(vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
vfloat8e5m2m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tu(vd, vs2, vl);
}
@@ -358,7 +358,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tu(vbfloat16m2_t vd,
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tu(vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
vfloat8e5m2m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tu(vd, vs2, vl);
}
@@ -369,7 +369,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tu(vbfloat16m4_t vd,
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tu(vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
vfloat8e5m2m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tu(vd, vs2, vl);
}
@@ -381,7 +381,7 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tu(vbfloat16m8_t vd,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tum(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e5m2mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tum(vm, vd, vs2, vl);
}
@@ -394,7 +394,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tum(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tum(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e5m2mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tum(vm, vd, vs2, vl);
}
@@ -407,7 +407,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tum(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tum(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e5m2mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tum(vm, vd, vs2, vl);
}
@@ -420,7 +420,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tum(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tum(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
vfloat8e5m2m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tum(vm, vd, vs2, vl);
}
@@ -432,7 +432,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tum(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tum(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
vfloat8e5m2m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tum(vm, vd, vs2, vl);
}
@@ -444,7 +444,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tum(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tum(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
vfloat8e5m2m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tum(vm, vd, vs2, vl);
}
@@ -456,7 +456,7 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tum(vbool2_t vm,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tumu(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e5m2mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tumu(vm, vd, vs2, vl);
}
@@ -469,7 +469,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tumu(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tumu(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e5m2mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tumu(vm, vd, vs2, vl);
}
@@ -482,7 +482,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tumu(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tumu(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e5m2mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tumu(vm, vd, vs2, vl);
}
@@ -495,7 +495,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tumu(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tumu(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2,
vfloat8e5m2m1_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tumu(vm, vd, vs2, vl);
}
@@ -508,7 +508,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tumu(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tumu(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2,
vfloat8e5m2m2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tumu(vm, vd, vs2, vl);
}
@@ -521,7 +521,7 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tumu(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tumu(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2,
vfloat8e5m2m4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tumu(vm, vd, vs2, vl);
}
@@ -534,7 +534,7 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tumu(vbool2_t vm,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_mu(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e5m2mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_mu(vm, vd, vs2, vl);
}
@@ -547,7 +547,7 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_mu(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_mu(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e5m2mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_mu(vm, vd, vs2, vl);
}
@@ -560,7 +560,7 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_mu(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_mu(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e5m2mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_mu(vm, vd, vs2, vl);
}
@@ -573,7 +573,7 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_mu(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_mu(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
vfloat8e5m2m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m1_bf16m2_mu(vm, vd, vs2, vl);
}
@@ -585,7 +585,7 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_mu(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_mu(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
vfloat8e5m2m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m2_bf16m4_mu(vm, vd, vs2, vl);
}
@@ -597,6 +597,6 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_mu(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_mu(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
vfloat8e5m2m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f_v_f8e5m2m4_bf16m8_mu(vm, vd, vs2, vl);
}

View File

@@ -14,9 +14,9 @@
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tu(vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e4m3mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tu(vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tu(
@@ -26,9 +26,9 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tu(vbfloat16mf4_t vd,
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tu(vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e4m3mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tu(vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tu(
@@ -38,9 +38,9 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tu(vbfloat16mf2_t vd,
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tu(vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e4m3mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tu(vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tu(
@@ -50,8 +50,8 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tu(vbfloat16m1_t vd,
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tu(vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tu(vd, vs2, vl);
vfloat8e4m3m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tu(
@@ -61,8 +61,8 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tu(vbfloat16m2_t vd,
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tu(vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tu(vd, vs2, vl);
vfloat8e4m3m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tu(
@@ -72,8 +72,8 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tu(vbfloat16m4_t vd,
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tu(vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tu(vd, vs2, vl);
vfloat8e4m3m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tum(
@@ -84,9 +84,9 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tu(vbfloat16m8_t vd,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tum(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e4m3mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tum(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tum(
@@ -97,9 +97,9 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tum(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tum(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e4m3mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tum(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tum(
@@ -110,9 +110,9 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tum(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tum(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e4m3mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tum(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tum(
@@ -123,8 +123,8 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tum(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tum(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tum(vm, vd, vs2, vl);
vfloat8e4m3m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tum(
@@ -135,8 +135,8 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tum(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tum(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tum(vm, vd, vs2, vl);
vfloat8e4m3m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tum(
@@ -147,8 +147,8 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tum(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tum(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tum(vm, vd, vs2, vl);
vfloat8e4m3m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tumu(
@@ -159,9 +159,9 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tum(vbool2_t vm,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tumu(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e4m3mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tumu(
@@ -172,9 +172,9 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tumu(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tumu(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e4m3mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tumu(
@@ -185,9 +185,9 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tumu(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tumu(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e4m3mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tumu(
@@ -198,9 +198,9 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tumu(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tumu(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2,
vfloat8e4m3m1_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tumu(
@@ -211,9 +211,9 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tumu(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tumu(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2,
vfloat8e4m3m2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tumu(
@@ -224,9 +224,9 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tumu(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tumu(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2,
vfloat8e4m3m4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_mu(
@@ -237,9 +237,9 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tumu(vbool2_t vm,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_mu(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e4m3mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_mu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_mu(
@@ -250,9 +250,9 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_mu(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_mu(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e4m3mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_mu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_mu(
@@ -263,9 +263,9 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_mu(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_mu(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e4m3mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_mu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_mu(
@@ -276,8 +276,8 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_mu(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_mu(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_mu(vm, vd, vs2, vl);
vfloat8e4m3m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_mu(
@@ -288,8 +288,8 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_mu(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_mu(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_mu(vm, vd, vs2, vl);
vfloat8e4m3m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_mu(
@@ -300,8 +300,8 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_mu(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_mu(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e4m3_bf16_mu(vm, vd, vs2, vl);
vfloat8e4m3m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tu(
@@ -311,9 +311,9 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_mu(vbool2_t vm,
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tu(vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e5m2mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tu(vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tu(
@@ -323,9 +323,9 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tu(vbfloat16mf4_t vd,
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tu(vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e5m2mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tu(vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tu(
@@ -335,9 +335,9 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tu(vbfloat16mf2_t vd,
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tu(vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e5m2mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tu(vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tu(
@@ -347,8 +347,8 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tu(vbfloat16m1_t vd,
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tu(vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tu(vd, vs2, vl);
vfloat8e5m2m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tu(
@@ -358,8 +358,8 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tu(vbfloat16m2_t vd,
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tu(vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tu(vd, vs2, vl);
vfloat8e5m2m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tu(
@@ -369,8 +369,8 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tu(vbfloat16m4_t vd,
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tu(vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tu(vd, vs2, vl);
vfloat8e5m2m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tum(
@@ -381,9 +381,9 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tu(vbfloat16m8_t vd,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tum(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e5m2mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tum(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tum(
@@ -394,9 +394,9 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tum(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tum(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e5m2mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tum(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tum(
@@ -407,9 +407,9 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tum(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tum(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e5m2mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tum(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tum(
@@ -420,8 +420,8 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tum(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tum(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tum(vm, vd, vs2, vl);
vfloat8e5m2m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tum(
@@ -432,8 +432,8 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tum(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tum(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tum(vm, vd, vs2, vl);
vfloat8e5m2m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tum(
@@ -444,8 +444,8 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tum(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tum(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tum(vm, vd, vs2, vl);
vfloat8e5m2m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tumu(
@@ -456,9 +456,9 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tum(vbool2_t vm,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tumu(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e5m2mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tumu(
@@ -469,9 +469,9 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tumu(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tumu(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e5m2mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tumu(
@@ -482,9 +482,9 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tumu(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tumu(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e5m2mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tumu(
@@ -495,9 +495,9 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tumu(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tumu(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2,
vfloat8e5m2m1_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tumu(
@@ -508,9 +508,9 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tumu(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tumu(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2,
vfloat8e5m2m2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tumu(
@@ -521,9 +521,9 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tumu(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tumu(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2,
vfloat8e5m2m4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_tumu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_mu(
@@ -534,9 +534,9 @@ vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tumu(vbool2_t vm,
//
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_mu(vbool64_t vm,
vbfloat16mf4_t vd,
vuint8mf8_t vs2,
vfloat8e5m2mf8_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_mu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_mu(
@@ -547,9 +547,9 @@ vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_mu(vbool64_t vm,
//
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_mu(vbool32_t vm,
vbfloat16mf2_t vd,
vuint8mf4_t vs2,
vfloat8e5m2mf4_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_mu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_mu(
@@ -560,9 +560,9 @@ vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_mu(vbool32_t vm,
//
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_mu(vbool16_t vm,
vbfloat16m1_t vd,
vuint8mf2_t vs2,
vfloat8e5m2mf2_t vs2,
size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_mu(vm, vd, vs2, vl);
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_mu(
@@ -573,8 +573,8 @@ vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_mu(vbool16_t vm,
//
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_mu(vbool8_t vm,
vbfloat16m2_t vd,
vuint8m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_mu(vm, vd, vs2, vl);
vfloat8e5m2m1_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_mu(
@@ -585,8 +585,8 @@ vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_mu(vbool8_t vm,
//
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_mu(vbool4_t vm,
vbfloat16m4_t vd,
vuint8m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_mu(vm, vd, vs2, vl);
vfloat8e5m2m2_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_mu(
@@ -597,6 +597,6 @@ vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_mu(vbool4_t vm,
//
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_mu(vbool2_t vm,
vbfloat16m8_t vd,
vuint8m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_f8e5m2_bf16_mu(vm, vd, vs2, vl);
vfloat8e5m2m4_t vs2, size_t vl) {
return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
}

View File

@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfh -target-feature +zvfbfmin \
// RUN: -O0 -emit-llvm %s -o - | FileCheck %s
// RUN: -target-feature +experimental-zvfofp8min -O0 -emit-llvm %s -o - | FileCheck %s
#include <riscv_vector.h>
@@ -720,3 +720,39 @@ void foo () {
vbfloat16m4x2_t bf16m4x2;
}
// CHECK-LABEL: define dso_local void @_Z14test_ofp8_typev
// CHECK-NEXT: entry:
// CHECK-NEXT: [[E4M3MF8:%.*]] = alloca <vscale x 1 x i8>, align 1
// CHECK-NEXT: [[E4M3MF4:%.*]] = alloca <vscale x 2 x i8>, align 1
// CHECK-NEXT: [[E4M3MF2:%.*]] = alloca <vscale x 4 x i8>, align 1
// CHECK-NEXT: [[E4M3M1:%.*]] = alloca <vscale x 8 x i8>, align 1
// CHECK-NEXT: [[E4M3M2:%.*]] = alloca <vscale x 16 x i8>, align 1
// CHECK-NEXT: [[E4M3M4:%.*]] = alloca <vscale x 32 x i8>, align 1
// CHECK-NEXT: [[E4M3M8:%.*]] = alloca <vscale x 64 x i8>, align 1
// CHECK-NEXT: [[E5M2MF8:%.*]] = alloca <vscale x 1 x i8>, align 1
// CHECK-NEXT: [[E5M2MF4:%.*]] = alloca <vscale x 2 x i8>, align 1
// CHECK-NEXT: [[E5M2MF2:%.*]] = alloca <vscale x 4 x i8>, align 1
// CHECK-NEXT: [[E5M2M1:%.*]] = alloca <vscale x 8 x i8>, align 1
// CHECK-NEXT: [[E5M2M2:%.*]] = alloca <vscale x 16 x i8>, align 1
// CHECK-NEXT: [[E5M2M4:%.*]] = alloca <vscale x 32 x i8>, align 1
// CHECK-NEXT: [[E5M2M8:%.*]] = alloca <vscale x 64 x i8>, align 1
// CHECK-NEXT: ret void
void test_ofp8_type() {
// ofp8 e4m3
vfloat8e4m3mf8_t e4m3mf8;
vfloat8e4m3mf4_t e4m3mf4;
vfloat8e4m3mf2_t e4m3mf2;
vfloat8e4m3m1_t e4m3m1;
vfloat8e4m3m2_t e4m3m2;
vfloat8e4m3m4_t e4m3m4;
vfloat8e4m3m8_t e4m3m8;
// ofp8 e5m2
vfloat8e5m2mf8_t e5m2mf8;
vfloat8e5m2mf4_t e5m2mf4;
vfloat8e5m2mf2_t e5m2mf2;
vfloat8e5m2m1_t e5m2m1;
vfloat8e5m2m2_t e5m2m2;
vfloat8e5m2m4_t e5m2m4;
vfloat8e5m2m8_t e5m2m8;
}

View File

@@ -0,0 +1,62 @@
// RUN: %clang_cc1 -triple riscv32-none-linux-gnu %s -emit-llvm -o - \
// RUN: -target-feature +experimental-zvfofp8min -target-feature +zve64x | FileCheck %s
// RUN: %clang_cc1 -triple riscv64-none-linux-gnu %s -emit-llvm -o - \
// RUN: -target-feature +experimental-zvfofp8min -target-feature +zve64x | FileCheck %s
typedef __rvv_float8e4m3mf8_t vfloat8e4m3mf8_t;
typedef __rvv_float8e4m3mf4_t vfloat8e4m3mf4_t;
typedef __rvv_float8e4m3mf2_t vfloat8e4m3mf2_t;
typedef __rvv_float8e4m3m1_t vfloat8e4m3m1_t;
typedef __rvv_float8e4m3m2_t vfloat8e4m3m2_t;
typedef __rvv_float8e4m3m4_t vfloat8e4m3m4_t;
typedef __rvv_float8e4m3m8_t vfloat8e4m3m8_t;
typedef __rvv_float8e5m2mf8_t vfloat8e5m2mf8_t;
typedef __rvv_float8e5m2mf4_t vfloat8e5m2mf4_t;
typedef __rvv_float8e5m2mf2_t vfloat8e5m2mf2_t;
typedef __rvv_float8e5m2m1_t vfloat8e5m2m1_t;
typedef __rvv_float8e5m2m2_t vfloat8e5m2m2_t;
typedef __rvv_float8e5m2m4_t vfloat8e5m2m4_t;
typedef __rvv_float8e5m2m8_t vfloat8e5m2m8_t;
// CHECK: _Z8fe4m3mf8u21__rvv_float8e4m3mf8_t
void fe4m3mf8(vfloat8e4m3mf8_t) {}
// CHECK: _Z8fe4m3mf4u21__rvv_float8e4m3mf4_t
void fe4m3mf4(vfloat8e4m3mf4_t) {}
// CHECK: _Z8fe4m3mf2u21__rvv_float8e4m3mf2_t
void fe4m3mf2(vfloat8e4m3mf2_t) {}
// CHECK: _Z7fe4m3m1u20__rvv_float8e4m3m1_t
void fe4m3m1(vfloat8e4m3m1_t) {}
// CHECK: _Z7fe4m3m2u20__rvv_float8e4m3m2_t
void fe4m3m2(vfloat8e4m3m2_t) {}
// CHECK: _Z7fe4m3m4u20__rvv_float8e4m3m4_t
void fe4m3m4(vfloat8e4m3m4_t) {}
// CHECK: _Z7fe4m3m8u20__rvv_float8e4m3m8_t
void fe4m3m8(vfloat8e4m3m8_t) {}
// CHECK: _Z8fe5m2mf8u21__rvv_float8e5m2mf8_t
void fe5m2mf8(vfloat8e5m2mf8_t) {}
// CHECK: _Z8fe5m2mf4u21__rvv_float8e5m2mf4_t
void fe5m2mf4(vfloat8e5m2mf4_t) {}
// CHECK: _Z8fe5m2mf2u21__rvv_float8e5m2mf2_t
void fe5m2mf2(vfloat8e5m2mf2_t) {}
// CHECK: _Z7fe5m2m1u20__rvv_float8e5m2m1_t
void fe5m2m1(vfloat8e5m2m1_t) {}
// CHECK: _Z7fe5m2m2u20__rvv_float8e5m2m2_t
void fe5m2m2(vfloat8e5m2m2_t) {}
// CHECK: _Z7fe5m2m4u20__rvv_float8e5m2m4_t
void fe5m2m4(vfloat8e5m2m4_t) {}
// CHECK: _Z7fe5m2m8u20__rvv_float8e5m2m8_t
void fe5m2m8(vfloat8e5m2m8_t) {}

View File

@@ -1,5 +1,6 @@
// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
// RUN: -target-feature +zvfh -target-feature +zvfbfmin -ast-print %s | FileCheck %s
// RUN: -target-feature +zvfh -target-feature +zvfbfmin \
// RUN: -target-feature +experimental-zvfofp8min -ast-print %s | FileCheck %s
void bar(void) {
// CHECK: __rvv_int64m1_t x0;
@@ -155,6 +156,48 @@ void bar(void) {
// CHECK: __rvv_bfloat16mf2_t x50;
__rvv_bfloat16mf2_t x50;
// CHECK: __rvv_float8e4m3mf8_t x51;
__rvv_float8e4m3mf8_t x51;
// CHECK: __rvv_float8e4m3mf4_t x52;
__rvv_float8e4m3mf4_t x52;
// CHECK: __rvv_float8e4m3mf2_t x53;
__rvv_float8e4m3mf2_t x53;
// CHECK: __rvv_float8e4m3m1_t x54;
__rvv_float8e4m3m1_t x54;
// CHECK: __rvv_float8e4m3m2_t x55;
__rvv_float8e4m3m2_t x55;
// CHECK: __rvv_float8e4m3m4_t x56;
__rvv_float8e4m3m4_t x56;
// CHECK: __rvv_float8e4m3m8_t x57;
__rvv_float8e4m3m8_t x57;
// CHECK: __rvv_float8e5m2mf8_t x58;
__rvv_float8e5m2mf8_t x58;
// CHECK: __rvv_float8e5m2mf4_t x59;
__rvv_float8e5m2mf4_t x59;
// CHECK: __rvv_float8e5m2mf2_t x60;
__rvv_float8e5m2mf2_t x60;
// CHECK: __rvv_float8e5m2m1_t x61;
__rvv_float8e5m2m1_t x61;
// CHECK: __rvv_float8e5m2m2_t x62;
__rvv_float8e5m2m2_t x62;
// CHECK: __rvv_float8e5m2m4_t x63;
__rvv_float8e5m2m4_t x63;
// CHECK: __rvv_float8e5m2m8_t x64;
__rvv_float8e5m2m8_t x64;
}
typedef __rvv_bool4_t vbool4_t;

View File

@@ -496,6 +496,15 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
}
}
// TODO: Support tuple types for ofp8?
for (BasicType BT : {BasicType::F8E4M3, BasicType::F8E5M2}) {
for (int Log2LMUL : Log2LMULs) {
auto T = TypeCache.computeType(BT, Log2LMUL, PrototypeDescriptor::Vector);
if (T)
printType(*T);
}
}
OS << "\n#ifdef __cplusplus\n";
OS << "}\n";
OS << "#endif // __cplusplus\n";