Currently there's no OFP8 scalar type supported in both clang and llvm type system, the vector OFP8 RVV types are lowered to i8 llvm types for now. The reason to support only clang type is because of intrinsics definition capability. If we make the clang type also using uint8 vector types, it's not able to distinguish between E4M3 type and E5M2 type so that we have to append additional type suffix to it. intrinsic spec update pr: https://github.com/riscv-non-isa/riscv-rvv-intrinsic-doc/pull/432 vreinterpret intrinsic PR: https://github.com/llvm/llvm-project/pull/191626 DONT MERGE: We have to get the intrinsic spec merged first to be able to make zvfofp8min change
603 lines
41 KiB
C
603 lines
41 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
|
|
// REQUIRES: riscv-registered-target
|
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvfbfmin \
|
|
// RUN: -target-feature +experimental-zvfofp8min -disable-O0-optnone \
|
|
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
|
|
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
|
|
|
|
#include <riscv_vector.h>
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tu(vbfloat16mf4_t vd,
|
|
vfloat8e4m3mf8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tu(vbfloat16mf2_t vd,
|
|
vfloat8e4m3mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tu(vbfloat16m1_t vd,
|
|
vfloat8e4m3mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tu(vbfloat16m2_t vd,
|
|
vfloat8e4m3m1_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tu(vbfloat16m4_t vd,
|
|
vfloat8e4m3m2_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tu(vbfloat16m8_t vd,
|
|
vfloat8e4m3m4_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tum(vbool64_t vm,
|
|
vbfloat16mf4_t vd,
|
|
vfloat8e4m3mf8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tum(vbool32_t vm,
|
|
vbfloat16mf2_t vd,
|
|
vfloat8e4m3mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tum(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tum(vbool16_t vm,
|
|
vbfloat16m1_t vd,
|
|
vfloat8e4m3mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tum(vbool8_t vm,
|
|
vbfloat16m2_t vd,
|
|
vfloat8e4m3m1_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tum(vbool4_t vm,
|
|
vbfloat16m4_t vd,
|
|
vfloat8e4m3m2_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tum(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tum(vbool2_t vm,
|
|
vbfloat16m8_t vd,
|
|
vfloat8e4m3m4_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tumu(vbool64_t vm,
|
|
vbfloat16mf4_t vd,
|
|
vfloat8e4m3mf8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tumu(vbool32_t vm,
|
|
vbfloat16mf2_t vd,
|
|
vfloat8e4m3mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tumu(vbool16_t vm,
|
|
vbfloat16m1_t vd,
|
|
vfloat8e4m3mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tumu(vbool8_t vm,
|
|
vbfloat16m2_t vd,
|
|
vfloat8e4m3m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m1_bf16m2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tumu(vbool4_t vm,
|
|
vbfloat16m4_t vd,
|
|
vfloat8e4m3m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m2_bf16m4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tumu(vbool2_t vm,
|
|
vbfloat16m8_t vd,
|
|
vfloat8e4m3m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m4_bf16m8_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_mu(vbool64_t vm,
|
|
vbfloat16mf4_t vd,
|
|
vfloat8e4m3mf8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf8_bf16mf4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_mu(vbool32_t vm,
|
|
vbfloat16mf2_t vd,
|
|
vfloat8e4m3mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf4_bf16mf2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_mu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m1_t test_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_mu(vbool16_t vm,
|
|
vbfloat16m1_t vd,
|
|
vfloat8e4m3mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3mf2_bf16m1_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m2_t test_vfwcvt_f_f_v_f8e4m3m1_bf16m2_mu(vbool8_t vm,
|
|
vbfloat16m2_t vd,
|
|
vfloat8e4m3m1_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m1_bf16m2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m4_t test_vfwcvt_f_f_v_f8e4m3m2_bf16m4_mu(vbool4_t vm,
|
|
vbfloat16m4_t vd,
|
|
vfloat8e4m3m2_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m2_bf16m4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_mu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m8_t test_vfwcvt_f_f_v_f8e4m3m4_bf16m8_mu(vbool2_t vm,
|
|
vbfloat16m8_t vd,
|
|
vfloat8e4m3m4_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e4m3m4_bf16m8_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tu(vbfloat16mf4_t vd,
|
|
vfloat8e5m2mf8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tu(vbfloat16mf2_t vd,
|
|
vfloat8e5m2mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tu(vbfloat16m1_t vd,
|
|
vfloat8e5m2mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tu(vbfloat16m2_t vd,
|
|
vfloat8e5m2m1_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tu(vbfloat16m4_t vd,
|
|
vfloat8e5m2m2_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tu(vbfloat16m8_t vd,
|
|
vfloat8e5m2m4_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tu(vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tum(vbool64_t vm,
|
|
vbfloat16mf4_t vd,
|
|
vfloat8e5m2mf8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tum(vbool32_t vm,
|
|
vbfloat16mf2_t vd,
|
|
vfloat8e5m2mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tum(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tum(vbool16_t vm,
|
|
vbfloat16m1_t vd,
|
|
vfloat8e5m2mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tum(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tum(vbool8_t vm,
|
|
vbfloat16m2_t vd,
|
|
vfloat8e5m2m1_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tum(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tum(vbool4_t vm,
|
|
vbfloat16m4_t vd,
|
|
vfloat8e5m2m2_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tum(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tum(vbool2_t vm,
|
|
vbfloat16m8_t vd,
|
|
vfloat8e5m2m4_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tum(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tumu(vbool64_t vm,
|
|
vbfloat16mf4_t vd,
|
|
vfloat8e5m2mf8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tumu(vbool32_t vm,
|
|
vbfloat16mf2_t vd,
|
|
vfloat8e5m2mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tumu(vbool16_t vm,
|
|
vbfloat16m1_t vd,
|
|
vfloat8e5m2mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tumu(vbool8_t vm,
|
|
vbfloat16m2_t vd,
|
|
vfloat8e5m2m1_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m1_bf16m2_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tumu(vbool4_t vm,
|
|
vbfloat16m4_t vd,
|
|
vfloat8e5m2m2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m2_bf16m4_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tumu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tumu(vbool2_t vm,
|
|
vbfloat16m8_t vd,
|
|
vfloat8e5m2m4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m4_bf16m8_tumu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf4_t test_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_mu(vbool64_t vm,
|
|
vbfloat16mf4_t vd,
|
|
vfloat8e5m2mf8_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf8_bf16mf4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16mf2_t test_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_mu(vbool32_t vm,
|
|
vbfloat16mf2_t vd,
|
|
vfloat8e5m2mf4_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf4_bf16mf2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_mu(
|
|
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m1_t test_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_mu(vbool16_t vm,
|
|
vbfloat16m1_t vd,
|
|
vfloat8e5m2mf2_t vs2,
|
|
size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2mf2_bf16m1_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_mu(
|
|
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m2_t test_vfwcvt_f_f_v_f8e5m2m1_bf16m2_mu(vbool8_t vm,
|
|
vbfloat16m2_t vd,
|
|
vfloat8e5m2m1_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m1_bf16m2_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_mu(
|
|
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m4_t test_vfwcvt_f_f_v_f8e5m2m2_bf16m4_mu(vbool4_t vm,
|
|
vbfloat16m4_t vd,
|
|
vfloat8e5m2m2_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m2_bf16m4_mu(vm, vd, vs2, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_mu(
|
|
// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
|
|
// CHECK-RV64-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
|
|
// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
|
|
//
|
|
vbfloat16m8_t test_vfwcvt_f_f_v_f8e5m2m4_bf16m8_mu(vbool2_t vm,
|
|
vbfloat16m8_t vd,
|
|
vfloat8e5m2m4_t vs2, size_t vl) {
|
|
return __riscv_vfwcvt_f_f_v_f8e5m2m4_bf16m8_mu(vm, vd, vs2, vl);
|
|
}
|