Currently there's no OFP8 scalar type supported in both clang and llvm type system, the vector OFP8 RVV types are lowered to i8 llvm types for now. The reason to support only clang type is because of intrinsics definition capability. If we make the clang type also using uint8 vector types, it's not able to distinguish between E4M3 type and E5M2 type so that we have to append additional type suffix to it. intrinsic spec update pr: https://github.com/riscv-non-isa/riscv-rvv-intrinsic-doc/pull/432 vreinterpret intrinsic PR: https://github.com/llvm/llvm-project/pull/191626 DONT MERGE: We have to get the intrinsic spec merged first to be able to make zvfofp8min change
2479 lines
172 KiB
C
2479 lines
172 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
|
|
// REQUIRES: riscv-registered-target
|
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfbfmin \
|
|
// RUN: -target-feature +experimental-zvfofp8min -disable-O0-optnone \
|
|
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
|
|
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
|
|
|
|
#include <riscv_vector.h>
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_tu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_tu(vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_tu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_tu(vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_tu(vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_tu(vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_tu(vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_tu(vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e4m3m1_tu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_tu(vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_tu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_tu(vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e4m3m2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_tu(vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_tu(vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e4m3m4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_tu(vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_tu(vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_tum(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_tum(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_tum(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_tum(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_tum(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_tum(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_tum(vbool16_t vm, vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_tum(vbool16_t vm,
|
|
vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e4m3m1_tum(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_tum(vbool8_t vm, vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_tum(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_tum(vbool8_t vm, vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e4m3m2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_tum(vbool4_t vm, vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_tum(vbool4_t vm, vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e4m3m4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_tum(vbool2_t vm, vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_tum(vbool2_t vm, vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_tumu(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_tumu(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_tumu(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_tumu(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_tumu(vbool16_t vm,
|
|
vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_tumu(vbool16_t vm,
|
|
vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e4m3m1_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_tumu(vbool8_t vm, vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_tumu(vbool8_t vm,
|
|
vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e4m3m2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_tumu(vbool4_t vm, vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_tumu(vbool4_t vm,
|
|
vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e4m3m4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_tumu(vbool2_t vm, vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_tumu(vbool2_t vm,
|
|
vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_mu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_mu(vbool64_t vm, vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_mu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_mu(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_mu(vbool32_t vm, vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_mu(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_mu(vbool16_t vm, vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_mu(vbool16_t vm,
|
|
vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e4m3m1_mu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_mu(vbool8_t vm, vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_mu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_mu(vbool8_t vm, vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e4m3m2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_mu(vbool4_t vm, vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_mu(vbool4_t vm, vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e4m3m4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_mu(vbool2_t vm, vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_mu(vbool2_t vm, vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_tu(vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_tu(vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_tu(vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_tu(vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_tu(vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_tu(vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_tu(vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_tu(vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_tu(vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_tu(vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_tu(vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_tu(vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_tum(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_tum(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_tum(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_tum(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_tum(vbool16_t vm,
|
|
vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_tum(vbool16_t vm,
|
|
vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_tum(vbool8_t vm, vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_tum(vbool8_t vm,
|
|
vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_tum(vbool4_t vm, vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_tum(vbool4_t vm,
|
|
vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_tum(vbool2_t vm, vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_tum(vbool2_t vm,
|
|
vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_tumu(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_tumu(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_tumu(
|
|
vm, vd, vs2, __RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_tumu(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_tumu(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_tumu(
|
|
vm, vd, vs2, __RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_tumu(vbool16_t vm,
|
|
vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_tumu(vbool16_t vm,
|
|
vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_tumu(vbool8_t vm, vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_tumu(vbool8_t vm,
|
|
vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_tumu(vbool4_t vm, vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_tumu(vbool4_t vm,
|
|
vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_tumu(vbool2_t vm, vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_tumu(vbool2_t vm,
|
|
vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_mu(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e4m3mf8_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_mu(vbool64_t vm,
|
|
vfloat8e4m3mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e4m3mf8_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_mu(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e4m3mf4_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_mu(vbool32_t vm,
|
|
vfloat8e4m3mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e4m3mf4_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_mu(vbool16_t vm,
|
|
vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e4m3mf2_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_mu(vbool16_t vm,
|
|
vfloat8e4m3mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e4m3mf2_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_mu(vbool8_t vm, vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e4m3m1_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_mu(vbool8_t vm,
|
|
vfloat8e4m3m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e4m3m1_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_mu(vbool4_t vm, vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e4m3m2_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_mu(vbool4_t vm,
|
|
vfloat8e4m3m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e4m3m2_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_mu(vbool2_t vm, vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e4m3m4_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e4m3m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_mu(vbool2_t vm,
|
|
vfloat8e4m3m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e4m3m4_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_tu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_tu(vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_tu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_tu(vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_tu(vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_tu(vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_tu(vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_tu(vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e5m2m1_tu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_tu(vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_tu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_tu(vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e5m2m2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_tu(vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_tu(vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e5m2m4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_tu(vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_tu(vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_tum(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_tum(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_tum(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_tum(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_tum(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_tum(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_tum(vbool16_t vm, vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_tum(vbool16_t vm,
|
|
vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e5m2m1_tum(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_tum(vbool8_t vm, vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_tum(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_tum(vbool8_t vm, vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e5m2m2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_tum(vbool4_t vm, vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_tum(vbool4_t vm, vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e5m2m4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_tum(vbool2_t vm, vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_tum(vbool2_t vm, vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_tumu(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_tumu(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_tumu(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_tumu(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_tumu(vbool16_t vm,
|
|
vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_tumu(vbool16_t vm,
|
|
vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e5m2m1_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_tumu(vbool8_t vm, vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_tumu(vbool8_t vm,
|
|
vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e5m2m2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_tumu(vbool4_t vm, vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_tumu(vbool4_t vm,
|
|
vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e5m2m4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_tumu(vbool2_t vm, vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_tumu(vbool2_t vm,
|
|
vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_mu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_mu(vbool64_t vm, vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_mu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_mu(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_mu(vbool32_t vm, vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_mu(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_mu(vbool16_t vm, vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_mu(vbool16_t vm,
|
|
vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e5m2m1_mu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_mu(vbool8_t vm, vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_mu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_mu(vbool8_t vm, vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e5m2m2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_mu(vbool4_t vm, vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_mu(vbool4_t vm, vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e5m2m4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_mu(vbool2_t vm, vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2, size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_mu(vbool2_t vm, vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_tu(vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_tu(vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_tu(vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_tu(vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_tu(vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_tu(vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_tu(vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_tu(vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_tu(vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_tu(vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_tu(vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_tu(vd, vs2, __RISCV_FRM_RNE,
|
|
vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_tu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_tu(vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_tu(vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_tum(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_tum(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_tum(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_tum(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_tum(vbool16_t vm,
|
|
vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_tum(vbool16_t vm,
|
|
vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_tum(vbool8_t vm, vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_tum(vbool8_t vm,
|
|
vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_tum(vbool4_t vm, vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_tum(vbool4_t vm,
|
|
vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_tum(vbool2_t vm, vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_tum(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_tum(vbool2_t vm,
|
|
vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_tum(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_tumu(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_tumu(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_tumu(
|
|
vm, vd, vs2, __RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_tumu(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_tumu(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_tumu(
|
|
vm, vd, vs2, __RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_tumu(vbool16_t vm,
|
|
vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_tumu(vbool16_t vm,
|
|
vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_tumu(vbool8_t vm, vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_tumu(vbool8_t vm,
|
|
vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_tumu(vbool4_t vm, vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_tumu(vbool4_t vm,
|
|
vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_tumu(vbool2_t vm, vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_tumu(vbool2_t vm,
|
|
vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_tumu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_mu(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf4_f8e5m2mf8_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf8_t test_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_mu(vbool64_t vm,
|
|
vfloat8e5m2mf8_t vd,
|
|
vbfloat16mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf4_f8e5m2mf8_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_mu(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16mf2_f8e5m2mf4_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf4_t test_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_mu(vbool32_t vm,
|
|
vfloat8e5m2mf4_t vd,
|
|
vbfloat16mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16mf2_f8e5m2mf4_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_mu(vbool16_t vm,
|
|
vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m1_f8e5m2mf2_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2mf2_t test_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_mu(vbool16_t vm,
|
|
vfloat8e5m2mf2_t vd,
|
|
vbfloat16m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m1_f8e5m2mf2_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_mu(vbool8_t vm, vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m2_f8e5m2m1_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m1_t test_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_mu(vbool8_t vm,
|
|
vfloat8e5m2m1_t vd,
|
|
vbfloat16m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m2_f8e5m2m1_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_mu(vbool4_t vm, vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m4_f8e5m2m2_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m2_t test_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_mu(vbool4_t vm,
|
|
vfloat8e5m2m2_t vd,
|
|
vbfloat16m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m4_f8e5m2m2_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_mu(vbool2_t vm, vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_f_f_w_bf16m8_f8e5m2m4_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_mu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
|
//
|
|
vfloat8e5m2m4_t test_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_mu(vbool2_t vm,
|
|
vfloat8e5m2m4_t vd,
|
|
vbfloat16m8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfncvt_sat_f_f_w_bf16m8_f8e5m2m4_rm_mu(vm, vd, vs2,
|
|
__RISCV_FRM_RNE, vl);
|
|
}
|