1215 lines
44 KiB
C++
1215 lines
44 KiB
C++
//===-- Intrinsics.cpp - Intrinsic Function Handling ------------*- C++ -*-===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file implements functions required for supporting intrinsic functions.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
#include "llvm/ADT/StringExtras.h"
|
|
#include "llvm/ADT/StringTable.h"
|
|
#include "llvm/IR/ConstantRange.h"
|
|
#include "llvm/IR/Function.h"
|
|
#include "llvm/IR/IntrinsicsAArch64.h"
|
|
#include "llvm/IR/IntrinsicsAMDGPU.h"
|
|
#include "llvm/IR/IntrinsicsARM.h"
|
|
#include "llvm/IR/IntrinsicsBPF.h"
|
|
#include "llvm/IR/IntrinsicsHexagon.h"
|
|
#include "llvm/IR/IntrinsicsLoongArch.h"
|
|
#include "llvm/IR/IntrinsicsMips.h"
|
|
#include "llvm/IR/IntrinsicsNVPTX.h"
|
|
#include "llvm/IR/IntrinsicsPowerPC.h"
|
|
#include "llvm/IR/IntrinsicsR600.h"
|
|
#include "llvm/IR/IntrinsicsRISCV.h"
|
|
#include "llvm/IR/IntrinsicsS390.h"
|
|
#include "llvm/IR/IntrinsicsSPIRV.h"
|
|
#include "llvm/IR/IntrinsicsVE.h"
|
|
#include "llvm/IR/IntrinsicsX86.h"
|
|
#include "llvm/IR/IntrinsicsXCore.h"
|
|
#include "llvm/IR/Module.h"
|
|
#include "llvm/IR/NVVMIntrinsicUtils.h"
|
|
#include "llvm/IR/Type.h"
|
|
|
|
using namespace llvm;
|
|
|
|
/// Table of string intrinsic names indexed by enum value.
|
|
#define GET_INTRINSIC_NAME_TABLE
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|
|
|
|
StringRef Intrinsic::getBaseName(ID id) {
|
|
assert(id < num_intrinsics && "Invalid intrinsic ID!");
|
|
return IntrinsicNameTable[IntrinsicNameOffsetTable[id]];
|
|
}
|
|
|
|
StringRef Intrinsic::getName(ID id) {
|
|
assert(id < num_intrinsics && "Invalid intrinsic ID!");
|
|
assert(!Intrinsic::isOverloaded(id) &&
|
|
"This version of getName does not support overloading");
|
|
return getBaseName(id);
|
|
}
|
|
|
|
/// Returns a stable mangling for the type specified for use in the name
|
|
/// mangling scheme used by 'any' types in intrinsic signatures. The mangling
|
|
/// of named types is simply their name. Manglings for unnamed types consist
|
|
/// of a prefix ('p' for pointers, 'a' for arrays, 'f_' for functions)
|
|
/// combined with the mangling of their component types. A vararg function
|
|
/// type will have a suffix of 'vararg'. Since function types can contain
|
|
/// other function types, we close a function type mangling with suffix 'f'
|
|
/// which can't be confused with it's prefix. This ensures we don't have
|
|
/// collisions between two unrelated function types. Otherwise, you might
|
|
/// parse ffXX as f(fXX) or f(fX)X. (X is a placeholder for any other type.)
|
|
/// The HasUnnamedType boolean is set if an unnamed type was encountered,
|
|
/// indicating that extra care must be taken to ensure a unique name.
|
|
static std::string getMangledTypeStr(Type *Ty, bool &HasUnnamedType) {
|
|
std::string Result;
|
|
if (PointerType *PTyp = dyn_cast<PointerType>(Ty)) {
|
|
Result += "p" + utostr(PTyp->getAddressSpace());
|
|
} else if (ArrayType *ATyp = dyn_cast<ArrayType>(Ty)) {
|
|
Result += "a" + utostr(ATyp->getNumElements()) +
|
|
getMangledTypeStr(ATyp->getElementType(), HasUnnamedType);
|
|
} else if (StructType *STyp = dyn_cast<StructType>(Ty)) {
|
|
if (!STyp->isLiteral()) {
|
|
Result += "s_";
|
|
if (STyp->hasName())
|
|
Result += STyp->getName();
|
|
else
|
|
HasUnnamedType = true;
|
|
} else {
|
|
Result += "sl_";
|
|
for (auto *Elem : STyp->elements())
|
|
Result += getMangledTypeStr(Elem, HasUnnamedType);
|
|
}
|
|
// Ensure nested structs are distinguishable.
|
|
Result += "s";
|
|
} else if (FunctionType *FT = dyn_cast<FunctionType>(Ty)) {
|
|
Result += "f_" + getMangledTypeStr(FT->getReturnType(), HasUnnamedType);
|
|
for (size_t i = 0; i < FT->getNumParams(); i++)
|
|
Result += getMangledTypeStr(FT->getParamType(i), HasUnnamedType);
|
|
if (FT->isVarArg())
|
|
Result += "vararg";
|
|
// Ensure nested function types are distinguishable.
|
|
Result += "f";
|
|
} else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
|
|
ElementCount EC = VTy->getElementCount();
|
|
if (EC.isScalable())
|
|
Result += "nx";
|
|
Result += "v" + utostr(EC.getKnownMinValue()) +
|
|
getMangledTypeStr(VTy->getElementType(), HasUnnamedType);
|
|
} else if (TargetExtType *TETy = dyn_cast<TargetExtType>(Ty)) {
|
|
Result += "t";
|
|
Result += TETy->getName();
|
|
for (Type *ParamTy : TETy->type_params())
|
|
Result += "_" + getMangledTypeStr(ParamTy, HasUnnamedType);
|
|
for (unsigned IntParam : TETy->int_params())
|
|
Result += "_" + utostr(IntParam);
|
|
// Ensure nested target extension types are distinguishable.
|
|
Result += "t";
|
|
} else if (Ty) {
|
|
switch (Ty->getTypeID()) {
|
|
default:
|
|
llvm_unreachable("Unhandled type");
|
|
case Type::VoidTyID:
|
|
Result += "isVoid";
|
|
break;
|
|
case Type::MetadataTyID:
|
|
Result += "Metadata";
|
|
break;
|
|
case Type::HalfTyID:
|
|
Result += "f16";
|
|
break;
|
|
case Type::BFloatTyID:
|
|
Result += "bf16";
|
|
break;
|
|
case Type::FloatTyID:
|
|
Result += "f32";
|
|
break;
|
|
case Type::DoubleTyID:
|
|
Result += "f64";
|
|
break;
|
|
case Type::X86_FP80TyID:
|
|
Result += "f80";
|
|
break;
|
|
case Type::FP128TyID:
|
|
Result += "f128";
|
|
break;
|
|
case Type::PPC_FP128TyID:
|
|
Result += "ppcf128";
|
|
break;
|
|
case Type::X86_AMXTyID:
|
|
Result += "x86amx";
|
|
break;
|
|
case Type::IntegerTyID:
|
|
Result += "i" + utostr(cast<IntegerType>(Ty)->getBitWidth());
|
|
break;
|
|
case Type::ByteTyID:
|
|
Result += "b" + utostr(cast<ByteType>(Ty)->getBitWidth());
|
|
break;
|
|
}
|
|
}
|
|
return Result;
|
|
}
|
|
|
|
static std::string getIntrinsicNameImpl(Intrinsic::ID Id,
|
|
ArrayRef<Type *> OverloadTys, Module *M,
|
|
FunctionType *FT,
|
|
bool EarlyModuleCheck) {
|
|
|
|
assert(Id < Intrinsic::num_intrinsics && "Invalid intrinsic ID!");
|
|
assert((OverloadTys.empty() || Intrinsic::isOverloaded(Id)) &&
|
|
"This version of getName is for overloaded intrinsics only");
|
|
(void)EarlyModuleCheck;
|
|
assert((!EarlyModuleCheck || M ||
|
|
!any_of(OverloadTys, llvm::IsaPred<PointerType>)) &&
|
|
"Intrinsic overloading on pointer types need to provide a Module");
|
|
bool HasUnnamedType = false;
|
|
std::string Result(Intrinsic::getBaseName(Id));
|
|
for (Type *Ty : OverloadTys)
|
|
Result += "." + getMangledTypeStr(Ty, HasUnnamedType);
|
|
if (HasUnnamedType) {
|
|
assert(M && "unnamed types need a module");
|
|
if (!FT)
|
|
FT = Intrinsic::getType(M->getContext(), Id, OverloadTys);
|
|
else
|
|
assert(FT == Intrinsic::getType(M->getContext(), Id, OverloadTys) &&
|
|
"Provided FunctionType must match arguments");
|
|
return M->getUniqueIntrinsicName(Result, Id, FT);
|
|
}
|
|
return Result;
|
|
}
|
|
|
|
std::string Intrinsic::getName(ID Id, ArrayRef<Type *> OverloadTys, Module *M,
|
|
FunctionType *FT) {
|
|
assert(M && "We need to have a Module");
|
|
return getIntrinsicNameImpl(Id, OverloadTys, M, FT, true);
|
|
}
|
|
|
|
std::string Intrinsic::getNameNoUnnamedTypes(ID Id,
|
|
ArrayRef<Type *> OverloadTys) {
|
|
return getIntrinsicNameImpl(Id, OverloadTys, nullptr, nullptr, false);
|
|
}
|
|
|
|
/// IIT_Info - These are enumerators that describe the entries returned by the
|
|
/// getIntrinsicInfoTableEntries function.
|
|
///
|
|
/// Defined in Intrinsics.td.
|
|
enum IIT_Info {
|
|
#define GET_INTRINSIC_IITINFO
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|
|
};
|
|
|
|
static_assert(IIT_Done == 0, "IIT_Done expected to be 0");
|
|
|
|
static void
|
|
DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
|
|
SmallVectorImpl<Intrinsic::IITDescriptor> &OutputTable) {
|
|
using namespace Intrinsic;
|
|
|
|
auto IsScalableVector = [&]() {
|
|
IIT_Info NextInfo = IIT_Info(Infos[NextElt]);
|
|
if (NextInfo != IIT_SCALABLE_VEC)
|
|
return false;
|
|
// Eat the IIT_SCALABLE_VEC token.
|
|
++NextElt;
|
|
return true;
|
|
};
|
|
|
|
IIT_Info Info = IIT_Info(Infos[NextElt++]);
|
|
|
|
switch (Info) {
|
|
case IIT_Done:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Void, 0));
|
|
return;
|
|
case IIT_VARARG:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::VarArg, 0));
|
|
return;
|
|
case IIT_MMX:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::MMX, 0));
|
|
return;
|
|
case IIT_AMX:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::AMX, 0));
|
|
return;
|
|
case IIT_TOKEN:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Token, 0));
|
|
return;
|
|
case IIT_METADATA:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Metadata, 0));
|
|
return;
|
|
case IIT_F16:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Half, 0));
|
|
return;
|
|
case IIT_BF16:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::BFloat, 0));
|
|
return;
|
|
case IIT_F32:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Float, 0));
|
|
return;
|
|
case IIT_F64:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Double, 0));
|
|
return;
|
|
case IIT_F128:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Quad, 0));
|
|
return;
|
|
case IIT_PPCF128:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::PPCQuad, 0));
|
|
return;
|
|
case IIT_I1:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 1));
|
|
return;
|
|
case IIT_I2:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 2));
|
|
return;
|
|
case IIT_I4:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 4));
|
|
return;
|
|
case IIT_AARCH64_SVCOUNT:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::AArch64Svcount, 0));
|
|
return;
|
|
case IIT_I8:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8));
|
|
return;
|
|
case IIT_I16:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 16));
|
|
return;
|
|
case IIT_I32:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 32));
|
|
return;
|
|
case IIT_I64:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 64));
|
|
return;
|
|
case IIT_I128:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 128));
|
|
return;
|
|
case IIT_V1:
|
|
OutputTable.push_back(IITDescriptor::getVector(1, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V2:
|
|
OutputTable.push_back(IITDescriptor::getVector(2, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V3:
|
|
OutputTable.push_back(IITDescriptor::getVector(3, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V4:
|
|
OutputTable.push_back(IITDescriptor::getVector(4, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V6:
|
|
OutputTable.push_back(IITDescriptor::getVector(6, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V8:
|
|
OutputTable.push_back(IITDescriptor::getVector(8, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V10:
|
|
OutputTable.push_back(IITDescriptor::getVector(10, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V16:
|
|
OutputTable.push_back(IITDescriptor::getVector(16, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V32:
|
|
OutputTable.push_back(IITDescriptor::getVector(32, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V64:
|
|
OutputTable.push_back(IITDescriptor::getVector(64, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V128:
|
|
OutputTable.push_back(IITDescriptor::getVector(128, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V256:
|
|
OutputTable.push_back(IITDescriptor::getVector(256, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V512:
|
|
OutputTable.push_back(IITDescriptor::getVector(512, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V1024:
|
|
OutputTable.push_back(IITDescriptor::getVector(1024, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V2048:
|
|
OutputTable.push_back(IITDescriptor::getVector(2048, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_V4096:
|
|
OutputTable.push_back(IITDescriptor::getVector(4096, IsScalableVector()));
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
case IIT_EXTERNREF:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 10));
|
|
return;
|
|
case IIT_FUNCREF:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 20));
|
|
return;
|
|
case IIT_PTR:
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0));
|
|
return;
|
|
case IIT_PTR_AS: // pointer with address space.
|
|
OutputTable.push_back(
|
|
IITDescriptor::get(IITDescriptor::Pointer, Infos[NextElt++]));
|
|
return;
|
|
case IIT_ANY: {
|
|
unsigned OverloadInfo = Infos[NextElt++];
|
|
OutputTable.push_back(
|
|
IITDescriptor::get(IITDescriptor::Overloaded, OverloadInfo));
|
|
return;
|
|
}
|
|
case IIT_EXTEND_ARG: {
|
|
unsigned OverloadIndex = Infos[NextElt++];
|
|
OutputTable.push_back(
|
|
IITDescriptor::get(IITDescriptor::Extend, OverloadIndex));
|
|
return;
|
|
}
|
|
case IIT_TRUNC_ARG: {
|
|
unsigned OverloadIndex = Infos[NextElt++];
|
|
OutputTable.push_back(
|
|
IITDescriptor::get(IITDescriptor::Trunc, OverloadIndex));
|
|
return;
|
|
}
|
|
case IIT_ONE_NTH_ELTS_VEC_ARG: {
|
|
unsigned short OverloadIndex = Infos[NextElt++];
|
|
unsigned short N = Infos[NextElt++];
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::OneNthEltsVec,
|
|
/*Hi=*/N, /*Lo=*/OverloadIndex));
|
|
return;
|
|
}
|
|
case IIT_SAME_VEC_WIDTH_ARG: {
|
|
unsigned OverloadIndex = Infos[NextElt++];
|
|
OutputTable.push_back(
|
|
IITDescriptor::get(IITDescriptor::SameVecWidth, OverloadIndex));
|
|
return;
|
|
}
|
|
case IIT_VEC_OF_ANYPTRS_TO_ELT: {
|
|
unsigned short OverloadIndex = Infos[NextElt++];
|
|
unsigned short RefOverloadIndex = Infos[NextElt++];
|
|
OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecOfAnyPtrsToElt,
|
|
/*Hi=*/RefOverloadIndex,
|
|
/*Lo=*/OverloadIndex));
|
|
return;
|
|
}
|
|
case IIT_STRUCT: {
|
|
unsigned StructElts = Infos[NextElt++] + 2;
|
|
|
|
OutputTable.push_back(
|
|
IITDescriptor::get(IITDescriptor::Struct, StructElts));
|
|
|
|
for (unsigned i = 0; i != StructElts; ++i)
|
|
DecodeIITType(NextElt, Infos, OutputTable);
|
|
return;
|
|
}
|
|
case IIT_SUBDIVIDE2_ARG: {
|
|
unsigned OverloadIndex = Infos[NextElt++];
|
|
OutputTable.push_back(
|
|
IITDescriptor::get(IITDescriptor::Subdivide2, OverloadIndex));
|
|
return;
|
|
}
|
|
case IIT_SUBDIVIDE4_ARG: {
|
|
unsigned OverloadIndex = Infos[NextElt++];
|
|
OutputTable.push_back(
|
|
IITDescriptor::get(IITDescriptor::Subdivide4, OverloadIndex));
|
|
return;
|
|
}
|
|
case IIT_VEC_ELEMENT: {
|
|
unsigned OverloadIndex = Infos[NextElt++];
|
|
OutputTable.push_back(
|
|
IITDescriptor::get(IITDescriptor::VecElement, OverloadIndex));
|
|
return;
|
|
}
|
|
case IIT_VEC_OF_BITCASTS_TO_INT: {
|
|
unsigned OverloadIndex = Infos[NextElt++];
|
|
OutputTable.push_back(
|
|
IITDescriptor::get(IITDescriptor::VecOfBitcastsToInt, OverloadIndex));
|
|
return;
|
|
}
|
|
case IIT_SCALABLE_VEC:
|
|
break;
|
|
}
|
|
llvm_unreachable("unhandled");
|
|
}
|
|
|
|
#define GET_INTRINSIC_GENERATOR_GLOBAL
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|
|
|
|
void Intrinsic::getIntrinsicInfoTableEntries(
|
|
ID id, SmallVectorImpl<IITDescriptor> &T) {
|
|
// Note that `FixedEncodingTy` is defined in IntrinsicImpl.inc and can be
|
|
// uint16_t or uint32_t based on the the value of `Use16BitFixedEncoding` in
|
|
// IntrinsicEmitter.cpp.
|
|
constexpr unsigned FixedEncodingBits = sizeof(FixedEncodingTy) * CHAR_BIT;
|
|
constexpr unsigned MSBPosition = FixedEncodingBits - 1;
|
|
// Mask with all bits 1 except the most significant bit.
|
|
constexpr unsigned Mask = (1U << MSBPosition) - 1;
|
|
|
|
FixedEncodingTy TableVal = IIT_Table[id - 1];
|
|
|
|
// Array to hold the inlined fixed encoding values expanded from nibbles to
|
|
// bytes. Its size can be be atmost FixedEncodingBits / 4 i.e., number
|
|
// of nibbles that can fit in `FixedEncodingTy` + 1 (the IIT_Done terminator
|
|
// that is not explicitly encoded). Note that if there are trailing 0 bytes
|
|
// in the encoding (for example, payload following one of the IIT tokens),
|
|
// the inlined encoding does not encode the actual size of the encoding, so
|
|
// we always assume its size of this maximum length possible, followed by the
|
|
// IIT_Done terminator token (whose value is 0).
|
|
unsigned char IITValues[FixedEncodingBits / 4 + 1] = {0};
|
|
|
|
ArrayRef<unsigned char> IITEntries;
|
|
unsigned NextElt = 0;
|
|
// Check to see if the intrinsic's type was inlined in the fixed encoding
|
|
// table.
|
|
if (TableVal >> MSBPosition) {
|
|
// This is an offset into the IIT_LongEncodingTable.
|
|
IITEntries = IIT_LongEncodingTable;
|
|
|
|
// Strip sentinel bit.
|
|
NextElt = TableVal & Mask;
|
|
} else {
|
|
// If the entry was encoded into a single word in the table itself, decode
|
|
// it from an array of nibbles to an array of bytes.
|
|
do {
|
|
IITValues[NextElt++] = TableVal & 0xF;
|
|
TableVal >>= 4;
|
|
} while (TableVal);
|
|
|
|
IITEntries = IITValues;
|
|
NextElt = 0;
|
|
}
|
|
|
|
// Okay, decode the table into the output vector of IITDescriptors.
|
|
DecodeIITType(NextElt, IITEntries, T);
|
|
while (IITEntries[NextElt] != IIT_Done)
|
|
DecodeIITType(NextElt, IITEntries, T);
|
|
}
|
|
|
|
static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
|
|
ArrayRef<Type *> OverloadTys,
|
|
LLVMContext &Context) {
|
|
using namespace Intrinsic;
|
|
|
|
IITDescriptor D = Infos.consume_front();
|
|
|
|
switch (D.Kind) {
|
|
case IITDescriptor::Void:
|
|
return Type::getVoidTy(Context);
|
|
case IITDescriptor::VarArg:
|
|
return Type::getVoidTy(Context);
|
|
case IITDescriptor::MMX:
|
|
return llvm::FixedVectorType::get(llvm::IntegerType::get(Context, 64), 1);
|
|
case IITDescriptor::AMX:
|
|
return Type::getX86_AMXTy(Context);
|
|
case IITDescriptor::Token:
|
|
return Type::getTokenTy(Context);
|
|
case IITDescriptor::Metadata:
|
|
return Type::getMetadataTy(Context);
|
|
case IITDescriptor::Half:
|
|
return Type::getHalfTy(Context);
|
|
case IITDescriptor::BFloat:
|
|
return Type::getBFloatTy(Context);
|
|
case IITDescriptor::Float:
|
|
return Type::getFloatTy(Context);
|
|
case IITDescriptor::Double:
|
|
return Type::getDoubleTy(Context);
|
|
case IITDescriptor::Quad:
|
|
return Type::getFP128Ty(Context);
|
|
case IITDescriptor::PPCQuad:
|
|
return Type::getPPC_FP128Ty(Context);
|
|
case IITDescriptor::AArch64Svcount:
|
|
return TargetExtType::get(Context, "aarch64.svcount");
|
|
|
|
case IITDescriptor::Integer:
|
|
return IntegerType::get(Context, D.IntegerWidth);
|
|
case IITDescriptor::Vector:
|
|
return VectorType::get(DecodeFixedType(Infos, OverloadTys, Context),
|
|
D.VectorWidth);
|
|
case IITDescriptor::Pointer:
|
|
return PointerType::get(Context, D.PointerAddressSpace);
|
|
case IITDescriptor::Struct: {
|
|
SmallVector<Type *, 8> Elts;
|
|
for (unsigned i = 0, e = D.StructNumElements; i != e; ++i)
|
|
Elts.push_back(DecodeFixedType(Infos, OverloadTys, Context));
|
|
return StructType::get(Context, Elts);
|
|
}
|
|
// For any overload kind or partially dependent type, substitute it with the
|
|
// corresponding concrete type from OverloadTys.
|
|
case IITDescriptor::Overloaded:
|
|
case IITDescriptor::VecOfAnyPtrsToElt:
|
|
return OverloadTys[D.getOverloadIndex()];
|
|
case IITDescriptor::Extend: {
|
|
Type *Ty = OverloadTys[D.getOverloadIndex()];
|
|
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
|
|
return VectorType::getExtendedElementVectorType(VTy);
|
|
|
|
return IntegerType::get(Context, 2 * cast<IntegerType>(Ty)->getBitWidth());
|
|
}
|
|
case IITDescriptor::Trunc: {
|
|
Type *Ty = OverloadTys[D.getOverloadIndex()];
|
|
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
|
|
return VectorType::getTruncatedElementVectorType(VTy);
|
|
|
|
IntegerType *ITy = cast<IntegerType>(Ty);
|
|
assert(ITy->getBitWidth() % 2 == 0);
|
|
return IntegerType::get(Context, ITy->getBitWidth() / 2);
|
|
}
|
|
case IITDescriptor::Subdivide2:
|
|
case IITDescriptor::Subdivide4: {
|
|
Type *Ty = OverloadTys[D.getOverloadIndex()];
|
|
VectorType *VTy = dyn_cast<VectorType>(Ty);
|
|
assert(VTy && "Expected overload type to be a Vector Type");
|
|
int SubDivs = D.Kind == IITDescriptor::Subdivide2 ? 1 : 2;
|
|
return VectorType::getSubdividedVectorType(VTy, SubDivs);
|
|
}
|
|
case IITDescriptor::OneNthEltsVec:
|
|
return VectorType::getOneNthElementsVectorType(
|
|
cast<VectorType>(OverloadTys[D.getOverloadIndex()]),
|
|
D.getVectorDivisor());
|
|
case IITDescriptor::SameVecWidth: {
|
|
Type *EltTy = DecodeFixedType(Infos, OverloadTys, Context);
|
|
Type *Ty = OverloadTys[D.getOverloadIndex()];
|
|
if (auto *VTy = dyn_cast<VectorType>(Ty))
|
|
return VectorType::get(EltTy, VTy->getElementCount());
|
|
return EltTy;
|
|
}
|
|
case IITDescriptor::VecElement: {
|
|
Type *Ty = OverloadTys[D.getOverloadIndex()];
|
|
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
|
|
return VTy->getElementType();
|
|
llvm_unreachable("Expected overload type to be a Vector Type");
|
|
}
|
|
case IITDescriptor::VecOfBitcastsToInt: {
|
|
Type *Ty = OverloadTys[D.getOverloadIndex()];
|
|
VectorType *VTy = dyn_cast<VectorType>(Ty);
|
|
assert(VTy && "Expected overload type to be a Vector Type");
|
|
return VectorType::getInteger(VTy);
|
|
}
|
|
}
|
|
llvm_unreachable("unhandled");
|
|
}
|
|
|
|
FunctionType *Intrinsic::getType(LLVMContext &Context, ID id,
|
|
ArrayRef<Type *> OverloadTys) {
|
|
SmallVector<IITDescriptor, 8> Table;
|
|
getIntrinsicInfoTableEntries(id, Table);
|
|
|
|
ArrayRef<IITDescriptor> TableRef = Table;
|
|
Type *ResultTy = DecodeFixedType(TableRef, OverloadTys, Context);
|
|
|
|
SmallVector<Type *, 8> ArgTys;
|
|
while (!TableRef.empty())
|
|
ArgTys.push_back(DecodeFixedType(TableRef, OverloadTys, Context));
|
|
|
|
// VarArg intrinsics encode a void type as the last argument type. Detect that
|
|
// and then drop the void argument.
|
|
bool IsVarArg = false;
|
|
if (!ArgTys.empty() && ArgTys.back()->isVoidTy()) {
|
|
ArgTys.pop_back();
|
|
IsVarArg = true;
|
|
}
|
|
return FunctionType::get(ResultTy, ArgTys, IsVarArg);
|
|
}
|
|
|
|
bool Intrinsic::isOverloaded(ID id) {
|
|
#define GET_INTRINSIC_OVERLOAD_TABLE
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|
|
}
|
|
|
|
bool Intrinsic::isTriviallyScalarizable(ID id) {
|
|
#define GET_INTRINSIC_SCALARIZABLE_TABLE
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|
|
}
|
|
|
|
bool Intrinsic::hasPrettyPrintedArgs(ID id){
|
|
#define GET_INTRINSIC_PRETTY_PRINT_TABLE
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|
|
}
|
|
|
|
/// Table of per-target intrinsic name tables.
|
|
#define GET_INTRINSIC_TARGET_DATA
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|
|
|
|
bool Intrinsic::isTargetIntrinsic(Intrinsic::ID IID) {
|
|
return IID > TargetInfos[0].Count;
|
|
}
|
|
|
|
/// Looks up Name in NameTable via binary search. NameTable must be sorted
|
|
/// and all entries must start with "llvm.". If NameTable contains an exact
|
|
/// match for Name or a prefix of Name followed by a dot, its index in
|
|
/// NameTable is returned. Otherwise, -1 is returned.
|
|
static int lookupLLVMIntrinsicByName(ArrayRef<unsigned> NameOffsetTable,
|
|
StringRef Name, StringRef Target = "") {
|
|
assert(Name.starts_with("llvm.") && "Unexpected intrinsic prefix");
|
|
assert(Name.drop_front(5).starts_with(Target) && "Unexpected target");
|
|
|
|
// Do successive binary searches of the dotted name components. For
|
|
// "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of
|
|
// intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then
|
|
// "llvm.gc.experimental.statepoint", and then we will stop as the range is
|
|
// size 1. During the search, we can skip the prefix that we already know is
|
|
// identical. By using strncmp we consider names with differing suffixes to
|
|
// be part of the equal range.
|
|
size_t CmpEnd = 4; // Skip the "llvm" component.
|
|
if (!Target.empty())
|
|
CmpEnd += 1 + Target.size(); // skip the .target component.
|
|
|
|
const unsigned *Low = NameOffsetTable.begin();
|
|
const unsigned *High = NameOffsetTable.end();
|
|
const unsigned *LastLow = Low;
|
|
while (CmpEnd < Name.size() && High - Low > 0) {
|
|
size_t CmpStart = CmpEnd;
|
|
CmpEnd = Name.find('.', CmpStart + 1);
|
|
CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd;
|
|
auto Cmp = [CmpStart, CmpEnd](auto LHS, auto RHS) {
|
|
// `equal_range` requires the comparison to work with either side being an
|
|
// offset or the value. Detect which kind each side is to set up the
|
|
// compared strings.
|
|
const char *LHSStr;
|
|
if constexpr (std::is_integral_v<decltype(LHS)>)
|
|
LHSStr = IntrinsicNameTable.getCString(LHS);
|
|
else
|
|
LHSStr = LHS;
|
|
|
|
const char *RHSStr;
|
|
if constexpr (std::is_integral_v<decltype(RHS)>)
|
|
RHSStr = IntrinsicNameTable.getCString(RHS);
|
|
else
|
|
RHSStr = RHS;
|
|
|
|
return strncmp(LHSStr + CmpStart, RHSStr + CmpStart, CmpEnd - CmpStart) <
|
|
0;
|
|
};
|
|
LastLow = Low;
|
|
std::tie(Low, High) = std::equal_range(Low, High, Name.data(), Cmp);
|
|
}
|
|
if (High - Low > 0)
|
|
LastLow = Low;
|
|
|
|
if (LastLow == NameOffsetTable.end())
|
|
return -1;
|
|
StringRef NameFound = IntrinsicNameTable[*LastLow];
|
|
if (Name == NameFound ||
|
|
(Name.starts_with(NameFound) && Name[NameFound.size()] == '.'))
|
|
return LastLow - NameOffsetTable.begin();
|
|
return -1;
|
|
}
|
|
|
|
/// Find the segment of \c IntrinsicNameOffsetTable for intrinsics with the same
|
|
/// target as \c Name, or the generic table if \c Name is not target specific.
|
|
///
|
|
/// Returns the relevant slice of \c IntrinsicNameOffsetTable and the target
|
|
/// name.
|
|
static std::pair<ArrayRef<unsigned>, StringRef>
|
|
findTargetSubtable(StringRef Name) {
|
|
assert(Name.starts_with("llvm."));
|
|
|
|
ArrayRef<IntrinsicTargetInfo> Targets(TargetInfos);
|
|
// Drop "llvm." and take the first dotted component. That will be the target
|
|
// if this is target specific.
|
|
StringRef Target = Name.drop_front(5).split('.').first;
|
|
auto It = partition_point(
|
|
Targets, [=](const IntrinsicTargetInfo &TI) { return TI.Name < Target; });
|
|
// We've either found the target or just fall back to the generic set, which
|
|
// is always first.
|
|
const auto &TI = It != Targets.end() && It->Name == Target ? *It : Targets[0];
|
|
return {ArrayRef(&IntrinsicNameOffsetTable[1] + TI.Offset, TI.Count),
|
|
TI.Name};
|
|
}
|
|
|
|
/// This does the actual lookup of an intrinsic ID which matches the given
|
|
/// function name.
|
|
Intrinsic::ID Intrinsic::lookupIntrinsicID(StringRef Name) {
|
|
auto [NameOffsetTable, Target] = findTargetSubtable(Name);
|
|
int Idx = lookupLLVMIntrinsicByName(NameOffsetTable, Name, Target);
|
|
if (Idx == -1)
|
|
return Intrinsic::not_intrinsic;
|
|
|
|
// Intrinsic IDs correspond to the location in IntrinsicNameTable, but we have
|
|
// an index into a sub-table.
|
|
int Adjust = NameOffsetTable.data() - IntrinsicNameOffsetTable;
|
|
Intrinsic::ID ID = static_cast<Intrinsic::ID>(Idx + Adjust);
|
|
|
|
// If the intrinsic is not overloaded, require an exact match. If it is
|
|
// overloaded, require either exact or prefix match.
|
|
const auto MatchSize = IntrinsicNameTable[NameOffsetTable[Idx]].size();
|
|
assert(Name.size() >= MatchSize && "Expected either exact or prefix match");
|
|
bool IsExactMatch = Name.size() == MatchSize;
|
|
return IsExactMatch || Intrinsic::isOverloaded(ID) ? ID
|
|
: Intrinsic::not_intrinsic;
|
|
}
|
|
|
|
/// This defines the "Intrinsic::getAttributes(ID id)" method.
|
|
#define GET_INTRINSIC_ATTRIBUTES
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|
|
|
|
static Function *
|
|
getOrInsertIntrinsicDeclarationImpl(Module *M, Intrinsic::ID id,
|
|
ArrayRef<Type *> OverloadTys,
|
|
FunctionType *FT) {
|
|
std::string Name = OverloadTys.empty()
|
|
? Intrinsic::getName(id).str()
|
|
: Intrinsic::getName(id, OverloadTys, M, FT);
|
|
Function *F = cast<Function>(M->getOrInsertFunction(Name, FT).getCallee());
|
|
if (F->getFunctionType() == FT)
|
|
return F;
|
|
|
|
// It's possible that a declaration for this intrinsic already exists with an
|
|
// incorrect signature, if the signature has changed, but this particular
|
|
// declaration has not been auto-upgraded yet. In that case, rename the
|
|
// invalid declaration and insert a new one with the correct signature. The
|
|
// invalid declaration will get upgraded later.
|
|
F->setName(F->getName() + ".invalid");
|
|
return cast<Function>(M->getOrInsertFunction(Name, FT).getCallee());
|
|
}
|
|
|
|
Function *Intrinsic::getOrInsertDeclaration(Module *M, ID id,
|
|
ArrayRef<Type *> OverloadTys) {
|
|
// There can never be multiple globals with the same name of different types,
|
|
// because intrinsics must be a specific type.
|
|
FunctionType *FT = getType(M->getContext(), id, OverloadTys);
|
|
return getOrInsertIntrinsicDeclarationImpl(M, id, OverloadTys, FT);
|
|
}
|
|
|
|
Function *Intrinsic::getOrInsertDeclaration(Module *M, ID id, Type *RetTy,
|
|
ArrayRef<Type *> ArgTys) {
|
|
// If the intrinsic is not overloaded, use the non-overloaded version.
|
|
if (!Intrinsic::isOverloaded(id))
|
|
return getOrInsertDeclaration(M, id);
|
|
|
|
// Get the intrinsic signature metadata.
|
|
SmallVector<Intrinsic::IITDescriptor, 8> Table;
|
|
getIntrinsicInfoTableEntries(id, Table);
|
|
ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
|
|
|
|
FunctionType *FTy = FunctionType::get(RetTy, ArgTys, /*isVarArg=*/false);
|
|
|
|
// Automatically determine the overloaded types.
|
|
SmallVector<Type *, 4> OverloadTys;
|
|
[[maybe_unused]] Intrinsic::MatchIntrinsicTypesResult Res =
|
|
matchIntrinsicSignature(FTy, TableRef, OverloadTys);
|
|
assert(Res == Intrinsic::MatchIntrinsicTypes_Match &&
|
|
"intrinsic signature mismatch");
|
|
|
|
// If intrinsic requires vararg, recreate the FunctionType accordingly.
|
|
if (!matchIntrinsicVarArg(/*isVarArg=*/true, TableRef))
|
|
FTy = FunctionType::get(RetTy, ArgTys, /*isVarArg=*/true);
|
|
|
|
assert(TableRef.empty() && "Unprocessed descriptors remain");
|
|
|
|
return getOrInsertIntrinsicDeclarationImpl(M, id, OverloadTys, FTy);
|
|
}
|
|
|
|
Function *Intrinsic::getDeclarationIfExists(const Module *M, ID id) {
|
|
return M->getFunction(getName(id));
|
|
}
|
|
|
|
Function *Intrinsic::getDeclarationIfExists(Module *M, ID id,
|
|
ArrayRef<Type *> OverloadTys,
|
|
FunctionType *FT) {
|
|
return M->getFunction(getName(id, OverloadTys, M, FT));
|
|
}
|
|
|
|
// This defines the "Intrinsic::getIntrinsicForClangBuiltin()" method.
|
|
#define GET_LLVM_INTRINSIC_FOR_CLANG_BUILTIN
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|
|
|
|
// This defines the "Intrinsic::getIntrinsicForMSBuiltin()" method.
|
|
#define GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|
|
|
|
bool Intrinsic::isConstrainedFPIntrinsic(ID QID) {
|
|
switch (QID) {
|
|
#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
|
|
case Intrinsic::INTRINSIC:
|
|
#include "llvm/IR/ConstrainedOps.def"
|
|
#undef INSTRUCTION
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
bool Intrinsic::hasConstrainedFPRoundingModeOperand(Intrinsic::ID QID) {
|
|
switch (QID) {
|
|
#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
|
|
case Intrinsic::INTRINSIC: \
|
|
return ROUND_MODE == 1;
|
|
#include "llvm/IR/ConstrainedOps.def"
|
|
#undef INSTRUCTION
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
using DeferredIntrinsicMatchPair =
|
|
std::pair<Type *, ArrayRef<Intrinsic::IITDescriptor>>;
|
|
|
|
static bool
|
|
matchIntrinsicType(Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos,
|
|
SmallVectorImpl<Type *> &OverloadTys,
|
|
SmallVectorImpl<DeferredIntrinsicMatchPair> &DeferredChecks,
|
|
bool IsDeferredCheck) {
|
|
using namespace Intrinsic;
|
|
|
|
// If we ran out of descriptors, there are too many arguments.
|
|
if (Infos.empty())
|
|
return true;
|
|
|
|
// Do this before slicing off the 'front' part
|
|
auto InfosRef = Infos;
|
|
auto DeferCheck = [&DeferredChecks, &InfosRef](Type *T) {
|
|
DeferredChecks.emplace_back(T, InfosRef);
|
|
return false;
|
|
};
|
|
|
|
IITDescriptor D = Infos.consume_front();
|
|
|
|
switch (D.Kind) {
|
|
case IITDescriptor::Void:
|
|
return !Ty->isVoidTy();
|
|
case IITDescriptor::VarArg:
|
|
return true;
|
|
case IITDescriptor::MMX: {
|
|
FixedVectorType *VT = dyn_cast<FixedVectorType>(Ty);
|
|
return !VT || VT->getNumElements() != 1 ||
|
|
!VT->getElementType()->isIntegerTy(64);
|
|
}
|
|
case IITDescriptor::AMX:
|
|
return !Ty->isX86_AMXTy();
|
|
case IITDescriptor::Token:
|
|
return !Ty->isTokenTy();
|
|
case IITDescriptor::Metadata:
|
|
return !Ty->isMetadataTy();
|
|
case IITDescriptor::Half:
|
|
return !Ty->isHalfTy();
|
|
case IITDescriptor::BFloat:
|
|
return !Ty->isBFloatTy();
|
|
case IITDescriptor::Float:
|
|
return !Ty->isFloatTy();
|
|
case IITDescriptor::Double:
|
|
return !Ty->isDoubleTy();
|
|
case IITDescriptor::Quad:
|
|
return !Ty->isFP128Ty();
|
|
case IITDescriptor::PPCQuad:
|
|
return !Ty->isPPC_FP128Ty();
|
|
case IITDescriptor::Integer:
|
|
return !Ty->isIntegerTy(D.IntegerWidth);
|
|
case IITDescriptor::AArch64Svcount:
|
|
return !isa<TargetExtType>(Ty) ||
|
|
cast<TargetExtType>(Ty)->getName() != "aarch64.svcount";
|
|
case IITDescriptor::Vector: {
|
|
VectorType *VT = dyn_cast<VectorType>(Ty);
|
|
return !VT || VT->getElementCount() != D.VectorWidth ||
|
|
matchIntrinsicType(VT->getElementType(), Infos, OverloadTys,
|
|
DeferredChecks, IsDeferredCheck);
|
|
}
|
|
case IITDescriptor::Pointer: {
|
|
PointerType *PT = dyn_cast<PointerType>(Ty);
|
|
return !PT || PT->getAddressSpace() != D.PointerAddressSpace;
|
|
}
|
|
|
|
case IITDescriptor::Struct: {
|
|
StructType *ST = dyn_cast<StructType>(Ty);
|
|
if (!ST || !ST->isLiteral() || ST->isPacked() ||
|
|
ST->getNumElements() != D.StructNumElements)
|
|
return true;
|
|
|
|
for (unsigned i = 0, e = D.StructNumElements; i != e; ++i)
|
|
if (matchIntrinsicType(ST->getElementType(i), Infos, OverloadTys,
|
|
DeferredChecks, IsDeferredCheck))
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
case IITDescriptor::Overloaded:
|
|
// If this is the second occurrence of an argument,
|
|
// verify that the later instance matches the previous instance.
|
|
if (D.getOverloadIndex() < OverloadTys.size())
|
|
return Ty != OverloadTys[D.getOverloadIndex()];
|
|
|
|
if (D.getOverloadIndex() > OverloadTys.size() ||
|
|
D.getOverloadKind() == IITDescriptor::AK_MatchType)
|
|
return IsDeferredCheck || DeferCheck(Ty);
|
|
|
|
assert(D.getOverloadIndex() == OverloadTys.size() && !IsDeferredCheck &&
|
|
"Table consistency error");
|
|
OverloadTys.push_back(Ty);
|
|
|
|
switch (D.getOverloadKind()) {
|
|
case IITDescriptor::AK_Any:
|
|
return false; // Success
|
|
case IITDescriptor::AK_AnyInteger:
|
|
return !Ty->isIntOrIntVectorTy();
|
|
case IITDescriptor::AK_AnyFloat:
|
|
return !Ty->isFPOrFPVectorTy();
|
|
case IITDescriptor::AK_AnyVector:
|
|
return !isa<VectorType>(Ty);
|
|
case IITDescriptor::AK_AnyPointer:
|
|
return !isa<PointerType>(Ty);
|
|
default:
|
|
break;
|
|
}
|
|
llvm_unreachable("all argument kinds not covered");
|
|
|
|
case IITDescriptor::Extend: {
|
|
// If this is a forward reference, defer the check for later.
|
|
if (D.getOverloadIndex() >= OverloadTys.size())
|
|
return IsDeferredCheck || DeferCheck(Ty);
|
|
|
|
Type *NewTy = OverloadTys[D.getOverloadIndex()];
|
|
if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
|
|
NewTy = VectorType::getExtendedElementVectorType(VTy);
|
|
else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
|
|
NewTy = IntegerType::get(ITy->getContext(), 2 * ITy->getBitWidth());
|
|
else
|
|
return true;
|
|
|
|
return Ty != NewTy;
|
|
}
|
|
case IITDescriptor::Trunc: {
|
|
// If this is a forward reference, defer the check for later.
|
|
if (D.getOverloadIndex() >= OverloadTys.size())
|
|
return IsDeferredCheck || DeferCheck(Ty);
|
|
|
|
Type *NewTy = OverloadTys[D.getOverloadIndex()];
|
|
if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
|
|
NewTy = VectorType::getTruncatedElementVectorType(VTy);
|
|
else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
|
|
NewTy = IntegerType::get(ITy->getContext(), ITy->getBitWidth() / 2);
|
|
else
|
|
return true;
|
|
|
|
return Ty != NewTy;
|
|
}
|
|
case IITDescriptor::OneNthEltsVec: {
|
|
// If this is a forward reference, defer the check for later.
|
|
if (D.getOverloadIndex() >= OverloadTys.size())
|
|
return IsDeferredCheck || DeferCheck(Ty);
|
|
auto *VTy = dyn_cast<VectorType>(OverloadTys[D.getOverloadIndex()]);
|
|
if (!VTy)
|
|
return true;
|
|
if (!VTy->getElementCount().isKnownMultipleOf(D.getVectorDivisor()))
|
|
return true;
|
|
return VectorType::getOneNthElementsVectorType(VTy, D.getVectorDivisor()) !=
|
|
Ty;
|
|
}
|
|
case IITDescriptor::SameVecWidth: {
|
|
if (D.getOverloadIndex() >= OverloadTys.size()) {
|
|
// Defer check and subsequent check for the vector element type.
|
|
Infos.consume_front();
|
|
return IsDeferredCheck || DeferCheck(Ty);
|
|
}
|
|
auto *ReferenceType =
|
|
dyn_cast<VectorType>(OverloadTys[D.getOverloadIndex()]);
|
|
auto *ThisArgType = dyn_cast<VectorType>(Ty);
|
|
// Both must be vectors of the same number of elements or neither.
|
|
if ((ReferenceType != nullptr) != (ThisArgType != nullptr))
|
|
return true;
|
|
Type *EltTy = Ty;
|
|
if (ThisArgType) {
|
|
if (ReferenceType->getElementCount() != ThisArgType->getElementCount())
|
|
return true;
|
|
EltTy = ThisArgType->getElementType();
|
|
}
|
|
return matchIntrinsicType(EltTy, Infos, OverloadTys, DeferredChecks,
|
|
IsDeferredCheck);
|
|
}
|
|
case IITDescriptor::VecOfAnyPtrsToElt: {
|
|
unsigned RefOverloadIndex = D.getRefOverloadIndex();
|
|
if (RefOverloadIndex >= OverloadTys.size()) {
|
|
if (IsDeferredCheck)
|
|
return true;
|
|
// If forward referencing, already add the pointer-vector type and
|
|
// defer the checks for later.
|
|
OverloadTys.push_back(Ty);
|
|
return DeferCheck(Ty);
|
|
}
|
|
|
|
if (!IsDeferredCheck) {
|
|
assert(D.getOverloadIndex() == OverloadTys.size() &&
|
|
"Table consistency error");
|
|
OverloadTys.push_back(Ty);
|
|
}
|
|
|
|
// Verify the overloaded type "matches" the Ref type.
|
|
// i.e. Ty is a vector with the same width as Ref.
|
|
// Composed of pointers to the same element type as Ref.
|
|
auto *ReferenceType = dyn_cast<VectorType>(OverloadTys[RefOverloadIndex]);
|
|
auto *ThisArgVecTy = dyn_cast<VectorType>(Ty);
|
|
if (!ThisArgVecTy || !ReferenceType ||
|
|
(ReferenceType->getElementCount() != ThisArgVecTy->getElementCount()))
|
|
return true;
|
|
return !ThisArgVecTy->getElementType()->isPointerTy();
|
|
}
|
|
case IITDescriptor::VecElement: {
|
|
if (D.getOverloadIndex() >= OverloadTys.size())
|
|
return IsDeferredCheck ? true : DeferCheck(Ty);
|
|
auto *ReferenceType =
|
|
dyn_cast<VectorType>(OverloadTys[D.getOverloadIndex()]);
|
|
return !ReferenceType || Ty != ReferenceType->getElementType();
|
|
}
|
|
case IITDescriptor::Subdivide2:
|
|
case IITDescriptor::Subdivide4: {
|
|
// If this is a forward reference, defer the check for later.
|
|
if (D.getOverloadIndex() >= OverloadTys.size())
|
|
return IsDeferredCheck || DeferCheck(Ty);
|
|
|
|
Type *NewTy = OverloadTys[D.getOverloadIndex()];
|
|
if (auto *VTy = dyn_cast<VectorType>(NewTy)) {
|
|
int SubDivs = D.Kind == IITDescriptor::Subdivide2 ? 1 : 2;
|
|
NewTy = VectorType::getSubdividedVectorType(VTy, SubDivs);
|
|
return Ty != NewTy;
|
|
}
|
|
return true;
|
|
}
|
|
case IITDescriptor::VecOfBitcastsToInt: {
|
|
if (D.getOverloadIndex() >= OverloadTys.size())
|
|
return IsDeferredCheck || DeferCheck(Ty);
|
|
auto *ReferenceType =
|
|
dyn_cast<VectorType>(OverloadTys[D.getOverloadIndex()]);
|
|
auto *ThisArgVecTy = dyn_cast<VectorType>(Ty);
|
|
if (!ThisArgVecTy || !ReferenceType)
|
|
return true;
|
|
return ThisArgVecTy != VectorType::getInteger(ReferenceType);
|
|
}
|
|
}
|
|
llvm_unreachable("unhandled");
|
|
}
|
|
|
|
Intrinsic::MatchIntrinsicTypesResult
|
|
Intrinsic::matchIntrinsicSignature(FunctionType *FTy,
|
|
ArrayRef<Intrinsic::IITDescriptor> &Infos,
|
|
SmallVectorImpl<Type *> &OverloadTys) {
|
|
SmallVector<DeferredIntrinsicMatchPair, 2> DeferredChecks;
|
|
if (matchIntrinsicType(FTy->getReturnType(), Infos, OverloadTys,
|
|
DeferredChecks, false))
|
|
return MatchIntrinsicTypes_NoMatchRet;
|
|
|
|
unsigned NumDeferredReturnChecks = DeferredChecks.size();
|
|
|
|
for (auto *Ty : FTy->params())
|
|
if (matchIntrinsicType(Ty, Infos, OverloadTys, DeferredChecks, false))
|
|
return MatchIntrinsicTypes_NoMatchArg;
|
|
|
|
for (unsigned I = 0, E = DeferredChecks.size(); I != E; ++I) {
|
|
DeferredIntrinsicMatchPair &Check = DeferredChecks[I];
|
|
if (matchIntrinsicType(Check.first, Check.second, OverloadTys,
|
|
DeferredChecks, true))
|
|
return I < NumDeferredReturnChecks ? MatchIntrinsicTypes_NoMatchRet
|
|
: MatchIntrinsicTypes_NoMatchArg;
|
|
}
|
|
|
|
return MatchIntrinsicTypes_Match;
|
|
}
|
|
|
|
bool Intrinsic::matchIntrinsicVarArg(
|
|
bool isVarArg, ArrayRef<Intrinsic::IITDescriptor> &Infos) {
|
|
// If there are no descriptors left, then it can't be a vararg.
|
|
if (Infos.empty())
|
|
return isVarArg;
|
|
|
|
// There should be only one descriptor remaining at this point.
|
|
if (Infos.size() != 1)
|
|
return true;
|
|
|
|
// Check and verify the descriptor.
|
|
IITDescriptor D = Infos.consume_front();
|
|
if (D.Kind == IITDescriptor::VarArg)
|
|
return !isVarArg;
|
|
|
|
return true;
|
|
}
|
|
|
|
bool Intrinsic::getIntrinsicSignature(Intrinsic::ID ID, FunctionType *FT,
|
|
SmallVectorImpl<Type *> &OverloadTys) {
|
|
if (!ID)
|
|
return false;
|
|
|
|
SmallVector<Intrinsic::IITDescriptor, 8> Table;
|
|
getIntrinsicInfoTableEntries(ID, Table);
|
|
ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
|
|
|
|
if (Intrinsic::matchIntrinsicSignature(FT, TableRef, OverloadTys) !=
|
|
Intrinsic::MatchIntrinsicTypesResult::MatchIntrinsicTypes_Match) {
|
|
return false;
|
|
}
|
|
if (Intrinsic::matchIntrinsicVarArg(FT->isVarArg(), TableRef))
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
bool Intrinsic::getIntrinsicSignature(Function *F,
|
|
SmallVectorImpl<Type *> &OverloadTys) {
|
|
return getIntrinsicSignature(F->getIntrinsicID(), F->getFunctionType(),
|
|
OverloadTys);
|
|
}
|
|
|
|
std::optional<Function *> Intrinsic::remangleIntrinsicFunction(Function *F) {
|
|
SmallVector<Type *, 4> OverloadTys;
|
|
if (!getIntrinsicSignature(F, OverloadTys))
|
|
return std::nullopt;
|
|
|
|
Intrinsic::ID ID = F->getIntrinsicID();
|
|
StringRef Name = F->getName();
|
|
std::string WantedName =
|
|
Intrinsic::getName(ID, OverloadTys, F->getParent(), F->getFunctionType());
|
|
if (Name == WantedName)
|
|
return std::nullopt;
|
|
|
|
Function *NewDecl = [&] {
|
|
if (auto *ExistingGV = F->getParent()->getNamedValue(WantedName)) {
|
|
if (auto *ExistingF = dyn_cast<Function>(ExistingGV))
|
|
if (ExistingF->getFunctionType() == F->getFunctionType())
|
|
return ExistingF;
|
|
|
|
// The name already exists, but is not a function or has the wrong
|
|
// prototype. Make place for the new one by renaming the old version.
|
|
// Either this old version will be removed later on or the module is
|
|
// invalid and we'll get an error.
|
|
ExistingGV->setName(WantedName + ".renamed");
|
|
}
|
|
return Intrinsic::getOrInsertDeclaration(F->getParent(), ID, OverloadTys);
|
|
}();
|
|
|
|
NewDecl->setCallingConv(F->getCallingConv());
|
|
assert(NewDecl->getFunctionType() == F->getFunctionType() &&
|
|
"Shouldn't change the signature");
|
|
return NewDecl;
|
|
}
|
|
|
|
struct InterleaveIntrinsic {
|
|
Intrinsic::ID Interleave, Deinterleave;
|
|
};
|
|
|
|
static InterleaveIntrinsic InterleaveIntrinsics[] = {
|
|
{Intrinsic::vector_interleave2, Intrinsic::vector_deinterleave2},
|
|
{Intrinsic::vector_interleave3, Intrinsic::vector_deinterleave3},
|
|
{Intrinsic::vector_interleave4, Intrinsic::vector_deinterleave4},
|
|
{Intrinsic::vector_interleave5, Intrinsic::vector_deinterleave5},
|
|
{Intrinsic::vector_interleave6, Intrinsic::vector_deinterleave6},
|
|
{Intrinsic::vector_interleave7, Intrinsic::vector_deinterleave7},
|
|
{Intrinsic::vector_interleave8, Intrinsic::vector_deinterleave8},
|
|
};
|
|
|
|
Intrinsic::ID Intrinsic::getInterleaveIntrinsicID(unsigned Factor) {
|
|
assert(Factor >= 2 && Factor <= 8 && "Unexpected factor");
|
|
return InterleaveIntrinsics[Factor - 2].Interleave;
|
|
}
|
|
|
|
Intrinsic::ID Intrinsic::getDeinterleaveIntrinsicID(unsigned Factor) {
|
|
assert(Factor >= 2 && Factor <= 8 && "Unexpected factor");
|
|
return InterleaveIntrinsics[Factor - 2].Deinterleave;
|
|
}
|
|
|
|
#define GET_INTRINSIC_PRETTY_PRINT_ARGUMENTS
|
|
#include "llvm/IR/IntrinsicImpl.inc"
|