Previously `m_CanonicalIV()` matched any `VPRegionValue`, which was only correct because the canonical IV is the sole `VPRegionValue` today. Going ahead this is not scalable and there may be more `VPRegionValue`.
1074 lines
37 KiB
C++
1074 lines
37 KiB
C++
//===- VPlanPatternMatch.h - Match on VPValues and recipes ------*- C++ -*-===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file provides a simple and efficient mechanism for performing general
|
|
// tree-based pattern matches on the VPlan values and recipes, based on
|
|
// LLVM's IR pattern matchers.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#ifndef LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H
|
|
#define LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H
|
|
|
|
#include "VPlan.h"
|
|
#include "llvm/Support/PatternMatchHelpers.h"
|
|
|
|
using namespace llvm::PatternMatchHelpers;
|
|
|
|
namespace llvm::VPlanPatternMatch {
|
|
|
|
template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
|
|
return P.match(V);
|
|
}
|
|
|
|
/// A match functor that can be used as a UnaryPredicate in functional
|
|
/// algorithms like all_of.
|
|
template <typename Val, typename Pattern> auto match_fn(const Pattern &P) {
|
|
return bind_back<match<Val, Pattern>>(P);
|
|
}
|
|
|
|
template <typename Pattern> bool match(VPUser *U, const Pattern &P) {
|
|
auto *R = dyn_cast<VPRecipeBase>(U);
|
|
return R && match(R, P);
|
|
}
|
|
|
|
/// Match functor for VPUser.
|
|
template <typename Pattern> auto match_fn(const Pattern &P) {
|
|
return bind_back<match<Pattern>>(P);
|
|
}
|
|
|
|
template <typename Pattern> bool match(VPSingleDefRecipe *R, const Pattern &P) {
|
|
return P.match(static_cast<const VPRecipeBase *>(R));
|
|
}
|
|
|
|
/// Match an arbitrary VPValue and ignore it.
|
|
inline auto m_VPValue() { return m_Isa<VPValue>(); }
|
|
|
|
/// Match a specified VPValue.
|
|
struct specificval_ty {
|
|
const VPValue *Val;
|
|
|
|
specificval_ty(const VPValue *V) : Val(V) {}
|
|
|
|
bool match(const VPValue *VPV) const { return VPV == Val; }
|
|
};
|
|
|
|
inline specificval_ty m_Specific(const VPValue *VPV) { return VPV; }
|
|
|
|
/// Like m_Specific(), but works if the specific value to match is determined
|
|
/// as part of the same match() expression. For example:
|
|
/// m_Mul(m_VPValue(X), m_Specific(X)) is incorrect, because m_Specific() will
|
|
/// bind X before the pattern match starts.
|
|
/// m_Mul(m_VPValue(X), m_Deferred(X)) is correct, and will check against
|
|
/// whichever value m_VPValue(X) populated.
|
|
inline match_deferred<VPValue> m_Deferred(VPValue *const &V) { return V; }
|
|
|
|
/// Match an integer constant if Pred::isValue returns true for the APInt. \p
|
|
/// BitWidth optionally specifies the bitwidth the matched constant must have.
|
|
/// If it is 0, the matched constant can have any bitwidth.
|
|
template <typename Pred, unsigned BitWidth = 0> struct int_pred_ty {
|
|
Pred P;
|
|
|
|
int_pred_ty(Pred P) : P(std::move(P)) {}
|
|
int_pred_ty() : P() {}
|
|
|
|
bool match(const VPValue *VPV) const {
|
|
auto *VPI = dyn_cast<VPInstruction>(VPV);
|
|
if (VPI && VPI->getOpcode() == VPInstruction::Broadcast)
|
|
VPV = VPI->getOperand(0);
|
|
auto *CI = dyn_cast<VPConstantInt>(VPV);
|
|
if (!CI)
|
|
return false;
|
|
|
|
if (BitWidth != 0 && CI->getBitWidth() != BitWidth)
|
|
return false;
|
|
return P.isValue(CI->getAPInt());
|
|
}
|
|
};
|
|
|
|
/// Match a specified signed or unsigned integer value.
|
|
struct is_specific_int {
|
|
APInt Val;
|
|
bool IsSigned;
|
|
|
|
is_specific_int(APInt Val, bool IsSigned = false)
|
|
: Val(std::move(Val)), IsSigned(IsSigned) {}
|
|
|
|
bool isValue(const APInt &C) const {
|
|
return APInt::isSameValue(Val, C, IsSigned);
|
|
}
|
|
};
|
|
|
|
template <unsigned Bitwidth = 0>
|
|
using specific_intval = int_pred_ty<is_specific_int, Bitwidth>;
|
|
|
|
inline specific_intval<0> m_SpecificInt(uint64_t V) {
|
|
return specific_intval<0>(is_specific_int(APInt(64, V)));
|
|
}
|
|
|
|
inline specific_intval<0> m_SpecificSInt(int64_t V) {
|
|
return specific_intval<0>(
|
|
is_specific_int(APInt(64, V, /*isSigned=*/true), /*IsSigned=*/true));
|
|
}
|
|
|
|
inline specific_intval<1> m_False() {
|
|
return specific_intval<1>(is_specific_int(APInt(64, 0)));
|
|
}
|
|
|
|
inline specific_intval<1> m_True() {
|
|
return specific_intval<1>(is_specific_int(APInt(64, 1)));
|
|
}
|
|
|
|
struct is_all_ones {
|
|
bool isValue(const APInt &C) const { return C.isAllOnes(); }
|
|
};
|
|
|
|
/// Match an integer or vector with all bits set.
|
|
/// For vectors, this includes constants with undefined elements.
|
|
inline int_pred_ty<is_all_ones> m_AllOnes() {
|
|
return int_pred_ty<is_all_ones>();
|
|
}
|
|
|
|
struct is_zero_int {
|
|
bool isValue(const APInt &C) const { return C.isZero(); }
|
|
};
|
|
|
|
struct is_one {
|
|
bool isValue(const APInt &C) const { return C.isOne(); }
|
|
};
|
|
|
|
/// Match an integer 0 or a vector with all elements equal to 0.
|
|
/// For vectors, this includes constants with undefined elements.
|
|
inline int_pred_ty<is_zero_int> m_ZeroInt() {
|
|
return int_pred_ty<is_zero_int>();
|
|
}
|
|
|
|
/// Match an integer 1 or a vector with all elements equal to 1.
|
|
/// For vectors, this includes constants with undefined elements.
|
|
inline int_pred_ty<is_one> m_One() { return int_pred_ty<is_one>(); }
|
|
|
|
struct bind_apint {
|
|
const APInt *&Res;
|
|
|
|
bind_apint(const APInt *&Res) : Res(Res) {}
|
|
|
|
bool match(const VPValue *VPV) const {
|
|
auto *CI = dyn_cast<VPConstantInt>(VPV);
|
|
if (!CI)
|
|
return false;
|
|
Res = &CI->getAPInt();
|
|
return true;
|
|
}
|
|
};
|
|
|
|
inline bind_apint m_APInt(const APInt *&C) { return C; }
|
|
|
|
struct bind_const_int {
|
|
uint64_t &Res;
|
|
|
|
bind_const_int(uint64_t &Res) : Res(Res) {}
|
|
|
|
bool match(const VPValue *VPV) const {
|
|
const APInt *APConst;
|
|
if (!bind_apint(APConst).match(VPV))
|
|
return false;
|
|
if (auto C = APConst->tryZExtValue()) {
|
|
Res = *C;
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
};
|
|
|
|
struct match_poison {
|
|
bool match(const VPValue *V) const {
|
|
return isa<VPIRValue>(V) &&
|
|
isa<PoisonValue>(cast<VPIRValue>(V)->getValue());
|
|
}
|
|
};
|
|
|
|
/// Match a VPIRValue that's poison.
|
|
inline match_poison m_Poison() { return match_poison(); }
|
|
|
|
/// Match a plain integer constant no wider than 64-bits, capturing it if we
|
|
/// match.
|
|
inline bind_const_int m_ConstantInt(uint64_t &C) { return C; }
|
|
|
|
/// Match a VPValue, capturing it if we match.
|
|
inline match_bind<VPValue> m_VPValue(VPValue *&V) { return V; }
|
|
|
|
/// Match a VPIRValue.
|
|
inline match_bind<VPIRValue> m_VPIRValue(VPIRValue *&V) { return V; }
|
|
|
|
/// Match a VPSingleDefRecipe, capturing if we match.
|
|
inline match_bind<VPSingleDefRecipe>
|
|
m_VPSingleDefRecipe(VPSingleDefRecipe *&V) {
|
|
return V;
|
|
}
|
|
|
|
/// Match a VPInstruction, capturing if we match.
|
|
inline match_bind<VPInstruction> m_VPInstruction(VPInstruction *&V) {
|
|
return V;
|
|
}
|
|
|
|
template <typename Ops_t, unsigned Opcode, bool Commutative,
|
|
typename... RecipeTys>
|
|
struct Recipe_match {
|
|
Ops_t Ops;
|
|
|
|
template <typename... OpTy> Recipe_match(OpTy... Ops) : Ops(Ops...) {
|
|
static_assert(std::tuple_size<Ops_t>::value == sizeof...(Ops) &&
|
|
"number of operands in constructor doesn't match Ops_t");
|
|
static_assert((!Commutative || std::tuple_size<Ops_t>::value == 2) &&
|
|
"only binary ops can be commutative");
|
|
}
|
|
|
|
bool match(const VPValue *V) const {
|
|
auto *DefR = V->getDefiningRecipe();
|
|
return DefR && match(DefR);
|
|
}
|
|
|
|
bool match(const VPSingleDefRecipe *R) const {
|
|
return match(static_cast<const VPRecipeBase *>(R));
|
|
}
|
|
|
|
bool match(const VPRecipeBase *R) const {
|
|
if (std::tuple_size_v<Ops_t> == 0) {
|
|
auto *VPI = dyn_cast<VPInstruction>(R);
|
|
return VPI && VPI->getOpcode() == Opcode;
|
|
}
|
|
|
|
if ((!matchRecipeAndOpcode<RecipeTys>(R) && ...))
|
|
return false;
|
|
|
|
if (R->getNumOperands() < std::tuple_size<Ops_t>::value) {
|
|
[[maybe_unused]] auto *RepR = dyn_cast<VPReplicateRecipe>(R);
|
|
assert(((isa<VPInstruction>(R) &&
|
|
cast<VPInstruction>(R)->getNumOperandsForOpcode() == -1u) ||
|
|
(RepR && std::tuple_size_v<Ops_t> ==
|
|
RepR->getNumOperands() - RepR->isPredicated())) &&
|
|
"non-variadic recipe with matched opcode does not have the "
|
|
"expected number of operands");
|
|
return false;
|
|
}
|
|
|
|
// If the recipe has more operands than expected, we only support matching
|
|
// masked VPInstructions where the number of operands of the matcher is the
|
|
// same as the number of operands excluding mask.
|
|
if (R->getNumOperands() > std::tuple_size<Ops_t>::value) {
|
|
auto *VPI = dyn_cast<VPInstruction>(R);
|
|
if (!VPI || !VPI->isMasked() ||
|
|
VPI->getNumOperandsWithoutMask() != std::tuple_size<Ops_t>::value)
|
|
return false;
|
|
}
|
|
|
|
auto IdxSeq = std::make_index_sequence<std::tuple_size<Ops_t>::value>();
|
|
if (all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) {
|
|
return Op.match(R->getOperand(Idx));
|
|
}))
|
|
return true;
|
|
|
|
return Commutative &&
|
|
all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) {
|
|
return Op.match(R->getOperand(R->getNumOperands() - Idx - 1));
|
|
});
|
|
}
|
|
|
|
private:
|
|
template <typename RecipeTy>
|
|
static bool matchRecipeAndOpcode(const VPRecipeBase *R) {
|
|
auto *DefR = dyn_cast<RecipeTy>(R);
|
|
// Check for recipes that do not have opcodes.
|
|
if constexpr (std::is_same_v<RecipeTy, VPScalarIVStepsRecipe> ||
|
|
std::is_same_v<RecipeTy, VPDerivedIVRecipe> ||
|
|
std::is_same_v<RecipeTy, VPVectorEndPointerRecipe>)
|
|
return DefR;
|
|
else
|
|
return DefR && DefR->getOpcode() == Opcode;
|
|
}
|
|
|
|
/// Helper to check if predicate \p P holds on all tuple elements in Ops using
|
|
/// the provided index sequence.
|
|
template <typename Fn, std::size_t... Is>
|
|
bool all_of_tuple_elements(std::index_sequence<Is...>, Fn P) const {
|
|
return (P(std::get<Is>(Ops), Is) && ...);
|
|
}
|
|
};
|
|
|
|
template <unsigned Opcode, typename... OpTys>
|
|
using AllRecipe_match =
|
|
Recipe_match<std::tuple<OpTys...>, Opcode, /*Commutative*/ false,
|
|
VPWidenRecipe, VPReplicateRecipe, VPWidenCastRecipe,
|
|
VPInstruction>;
|
|
|
|
template <unsigned Opcode, typename... OpTys>
|
|
using AllRecipe_commutative_match =
|
|
Recipe_match<std::tuple<OpTys...>, Opcode, /*Commutative*/ true,
|
|
VPWidenRecipe, VPReplicateRecipe, VPInstruction>;
|
|
|
|
template <unsigned Opcode, typename... OpTys>
|
|
using VPInstruction_match = Recipe_match<std::tuple<OpTys...>, Opcode,
|
|
/*Commutative*/ false, VPInstruction>;
|
|
|
|
template <unsigned Opcode, typename... OpTys>
|
|
using VPInstruction_commutative_match =
|
|
Recipe_match<std::tuple<OpTys...>, Opcode,
|
|
/*Commutative*/ true, VPInstruction>;
|
|
|
|
template <unsigned Opcode, typename... OpTys>
|
|
inline VPInstruction_match<Opcode, OpTys...>
|
|
m_VPInstruction(const OpTys &...Ops) {
|
|
return VPInstruction_match<Opcode, OpTys...>(Ops...);
|
|
}
|
|
|
|
template <unsigned Opcode, typename Op0_t, typename Op1_t>
|
|
inline VPInstruction_commutative_match<Opcode, Op0_t, Op1_t>
|
|
m_c_VPInstruction(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return VPInstruction_commutative_match<Opcode, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
/// BuildVector is matches only its opcode, w/o matching its operands as the
|
|
/// number of operands is not fixed.
|
|
inline VPInstruction_match<VPInstruction::BuildVector> m_BuildVector() {
|
|
return m_VPInstruction<VPInstruction::BuildVector>();
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<Instruction::Freeze, Op0_t>
|
|
m_Freeze(const Op0_t &Op0) {
|
|
return m_VPInstruction<Instruction::Freeze>(Op0);
|
|
}
|
|
|
|
inline VPInstruction_match<VPInstruction::BranchOnCond> m_BranchOnCond() {
|
|
return m_VPInstruction<VPInstruction::BranchOnCond>();
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::BranchOnCond, Op0_t>
|
|
m_BranchOnCond(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::BranchOnCond>(Op0);
|
|
}
|
|
|
|
inline VPInstruction_match<VPInstruction::BranchOnTwoConds>
|
|
m_BranchOnTwoConds() {
|
|
return m_VPInstruction<VPInstruction::BranchOnTwoConds>();
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline VPInstruction_match<VPInstruction::BranchOnTwoConds, Op0_t, Op1_t>
|
|
m_BranchOnTwoConds(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_VPInstruction<VPInstruction::BranchOnTwoConds>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::Broadcast, Op0_t>
|
|
m_Broadcast(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::Broadcast>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::ExplicitVectorLength, Op0_t>
|
|
m_EVL(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::ExplicitVectorLength>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::ExtractLastLane, Op0_t>
|
|
m_ExtractLastLane(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::ExtractLastLane>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline VPInstruction_match<Instruction::ExtractElement, Op0_t, Op1_t>
|
|
m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_VPInstruction<Instruction::ExtractElement>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline VPInstruction_match<VPInstruction::ExtractLane, Op0_t, Op1_t>
|
|
m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_VPInstruction<VPInstruction::ExtractLane>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::ExtractLastPart, Op0_t>
|
|
m_ExtractLastPart(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::ExtractLastPart>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<
|
|
VPInstruction::ExtractLastLane,
|
|
VPInstruction_match<VPInstruction::ExtractLastPart, Op0_t>>
|
|
m_ExtractLastLaneOfLastPart(const Op0_t &Op0) {
|
|
return m_ExtractLastLane(m_ExtractLastPart(Op0));
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::ExtractPenultimateElement, Op0_t>
|
|
m_ExtractPenultimateElement(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::ExtractPenultimateElement>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t, typename Op2_t>
|
|
inline VPInstruction_match<VPInstruction::ActiveLaneMask, Op0_t, Op1_t, Op2_t>
|
|
m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
|
|
return m_VPInstruction<VPInstruction::ActiveLaneMask>(Op0, Op1, Op2);
|
|
}
|
|
|
|
inline VPInstruction_match<VPInstruction::BranchOnCount> m_BranchOnCount() {
|
|
return m_VPInstruction<VPInstruction::BranchOnCount>();
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline VPInstruction_match<VPInstruction::BranchOnCount, Op0_t, Op1_t>
|
|
m_BranchOnCount(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_VPInstruction<VPInstruction::BranchOnCount>(Op0, Op1);
|
|
}
|
|
|
|
inline VPInstruction_match<VPInstruction::AnyOf> m_AnyOf() {
|
|
return m_VPInstruction<VPInstruction::AnyOf>();
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::AnyOf, Op0_t>
|
|
m_AnyOf(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::AnyOf>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::FirstActiveLane, Op0_t>
|
|
m_FirstActiveLane(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::FirstActiveLane>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::LastActiveLane, Op0_t>
|
|
m_LastActiveLane(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::LastActiveLane>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t, typename Op2_t>
|
|
inline VPInstruction_match<VPInstruction::ExtractLastActive, Op0_t, Op1_t,
|
|
Op2_t>
|
|
m_ExtractLastActive(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
|
|
return m_VPInstruction<VPInstruction::ExtractLastActive>(Op0, Op1, Op2);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::ComputeReductionResult, Op0_t>
|
|
m_ComputeReductionResult(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::ComputeReductionResult>(Op0);
|
|
}
|
|
|
|
/// Match FindIV result pattern:
|
|
/// select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),
|
|
/// ComputeReductionResult(ReducedIV), Start.
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline bool matchFindIVResult(VPInstruction *VPI, Op0_t ReducedIV, Op1_t Start) {
|
|
return match(VPI, m_Select(m_SpecificICmp(ICmpInst::ICMP_NE,
|
|
m_ComputeReductionResult(ReducedIV),
|
|
m_VPValue()),
|
|
m_ComputeReductionResult(ReducedIV), Start));
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::Reverse, Op0_t>
|
|
m_Reverse(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::Reverse>(Op0);
|
|
}
|
|
|
|
inline VPInstruction_match<VPInstruction::StepVector> m_StepVector() {
|
|
return m_VPInstruction<VPInstruction::StepVector>();
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline VPInstruction_match<VPInstruction::ExitingIVValue, Op0_t>
|
|
m_ExitingIVValue(const Op0_t &Op0) {
|
|
return m_VPInstruction<VPInstruction::ExitingIVValue>(Op0);
|
|
}
|
|
|
|
template <unsigned Opcode, typename Op0_t>
|
|
inline AllRecipe_match<Opcode, Op0_t> m_Unary(const Op0_t &Op0) {
|
|
return AllRecipe_match<Opcode, Op0_t>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline AllRecipe_match<Instruction::Trunc, Op0_t> m_Trunc(const Op0_t &Op0) {
|
|
return m_Unary<Instruction::Trunc, Op0_t>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline match_combine_or<AllRecipe_match<Instruction::Trunc, Op0_t>, Op0_t>
|
|
m_TruncOrSelf(const Op0_t &Op0) {
|
|
return m_CombineOr(m_Trunc(Op0), Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline AllRecipe_match<Instruction::ZExt, Op0_t> m_ZExt(const Op0_t &Op0) {
|
|
return m_Unary<Instruction::ZExt, Op0_t>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline AllRecipe_match<Instruction::SExt, Op0_t> m_SExt(const Op0_t &Op0) {
|
|
return m_Unary<Instruction::SExt, Op0_t>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline AllRecipe_match<Instruction::FPExt, Op0_t> m_FPExt(const Op0_t &Op0) {
|
|
return m_Unary<Instruction::FPExt, Op0_t>(Op0);
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline match_combine_or<AllRecipe_match<Instruction::ZExt, Op0_t>,
|
|
AllRecipe_match<Instruction::SExt, Op0_t>>
|
|
m_ZExtOrSExt(const Op0_t &Op0) {
|
|
return m_CombineOr(m_ZExt(Op0), m_SExt(Op0));
|
|
}
|
|
|
|
template <typename Op0_t> inline auto m_WidenAnyExtend(const Op0_t &Op0) {
|
|
return m_Isa<VPWidenCastRecipe>(m_CombineOr(m_ZExtOrSExt(Op0), m_FPExt(Op0)));
|
|
}
|
|
|
|
template <typename Op0_t>
|
|
inline match_combine_or<AllRecipe_match<Instruction::ZExt, Op0_t>, Op0_t>
|
|
m_ZExtOrSelf(const Op0_t &Op0) {
|
|
return m_CombineOr(m_ZExt(Op0), Op0);
|
|
}
|
|
|
|
template <typename Op0_t> inline auto m_ZExtOrTruncOrSelf(const Op0_t &Op0) {
|
|
return m_CombineOr(m_ZExt(Op0), m_Trunc(Op0), Op0);
|
|
}
|
|
|
|
template <unsigned Opcode, typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_match<Opcode, Op0_t, Op1_t> m_Binary(const Op0_t &Op0,
|
|
const Op1_t &Op1) {
|
|
return AllRecipe_match<Opcode, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <unsigned Opcode, typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_commutative_match<Opcode, Op0_t, Op1_t>
|
|
m_c_Binary(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return AllRecipe_commutative_match<Opcode, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_match<Instruction::Add, Op0_t, Op1_t> m_Add(const Op0_t &Op0,
|
|
const Op1_t &Op1) {
|
|
return m_Binary<Instruction::Add, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_commutative_match<Instruction::Add, Op0_t, Op1_t>
|
|
m_c_Add(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_c_Binary<Instruction::Add, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_match<Instruction::Sub, Op0_t, Op1_t> m_Sub(const Op0_t &Op0,
|
|
const Op1_t &Op1) {
|
|
return m_Binary<Instruction::Sub, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_match<Instruction::Mul, Op0_t, Op1_t> m_Mul(const Op0_t &Op0,
|
|
const Op1_t &Op1) {
|
|
return m_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_commutative_match<Instruction::Mul, Op0_t, Op1_t>
|
|
m_c_Mul(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_c_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_match<Instruction::FMul, Op0_t, Op1_t>
|
|
m_FMul(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_Binary<Instruction::FMul, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_match<Instruction::FAdd, Op0_t, Op1_t>
|
|
m_FAdd(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_Binary<Instruction::FAdd, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_commutative_match<Instruction::FAdd, Op0_t, Op1_t>
|
|
m_c_FAdd(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_c_Binary<Instruction::FAdd, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_match<Instruction::UDiv, Op0_t, Op1_t>
|
|
m_UDiv(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_Binary<Instruction::UDiv, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
/// Match a binary AND operation.
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_commutative_match<Instruction::And, Op0_t, Op1_t>
|
|
m_c_BinaryAnd(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_c_Binary<Instruction::And, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
/// Match a binary OR operation. Note that while conceptually the operands can
|
|
/// be matched commutatively, \p Commutative defaults to false in line with the
|
|
/// IR-based pattern matching infrastructure. Use m_c_BinaryOr for a commutative
|
|
/// version of the matcher.
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_match<Instruction::Or, Op0_t, Op1_t>
|
|
m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_Binary<Instruction::Or, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline AllRecipe_commutative_match<Instruction::Or, Op0_t, Op1_t>
|
|
m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_c_Binary<Instruction::Or, Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
/// Cmp_match is a variant of BinaryRecipe_match that also binds the comparison
|
|
/// predicate. Opcodes must either be Instruction::ICmp or Instruction::FCmp, or
|
|
/// both.
|
|
template <typename Op0_t, typename Op1_t, unsigned... Opcodes>
|
|
struct Cmp_match {
|
|
static_assert((sizeof...(Opcodes) == 1 || sizeof...(Opcodes) == 2) &&
|
|
"Expected one or two opcodes");
|
|
static_assert(
|
|
((Opcodes == Instruction::ICmp || Opcodes == Instruction::FCmp) && ...) &&
|
|
"Expected a compare instruction opcode");
|
|
|
|
CmpPredicate *Predicate = nullptr;
|
|
Op0_t Op0;
|
|
Op1_t Op1;
|
|
|
|
Cmp_match(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1)
|
|
: Predicate(&Pred), Op0(Op0), Op1(Op1) {}
|
|
Cmp_match(const Op0_t &Op0, const Op1_t &Op1) : Op0(Op0), Op1(Op1) {}
|
|
|
|
bool match(const VPValue *V) const {
|
|
auto *DefR = V->getDefiningRecipe();
|
|
return DefR && match(DefR);
|
|
}
|
|
|
|
bool match(const VPRecipeBase *V) const {
|
|
if ((m_Binary<Opcodes>(Op0, Op1).match(V) || ...)) {
|
|
if (Predicate)
|
|
*Predicate = cast<VPRecipeWithIRFlags>(V)->getPredicate();
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
};
|
|
|
|
/// SpecificCmp_match is a variant of Cmp_match that matches the comparison
|
|
/// predicate, instead of binding it.
|
|
template <typename Op0_t, typename Op1_t, unsigned... Opcodes>
|
|
struct SpecificCmp_match {
|
|
const CmpPredicate Predicate;
|
|
Op0_t Op0;
|
|
Op1_t Op1;
|
|
|
|
SpecificCmp_match(CmpPredicate Pred, const Op0_t &LHS, const Op1_t &RHS)
|
|
: Predicate(Pred), Op0(LHS), Op1(RHS) {}
|
|
|
|
bool match(const VPValue *V) const {
|
|
auto *DefR = V->getDefiningRecipe();
|
|
return DefR && match(DefR);
|
|
}
|
|
|
|
bool match(const VPRecipeBase *V) const {
|
|
CmpPredicate CurrentPred;
|
|
return Cmp_match<Op0_t, Op1_t, Opcodes...>(CurrentPred, Op0, Op1)
|
|
.match(V) &&
|
|
CmpPredicate::getMatching(CurrentPred, Predicate);
|
|
}
|
|
};
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp> m_ICmp(const Op0_t &Op0,
|
|
const Op1_t &Op1) {
|
|
return Cmp_match<Op0_t, Op1_t, Instruction::ICmp>(Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp>
|
|
m_ICmp(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1) {
|
|
return Cmp_match<Op0_t, Op1_t, Instruction::ICmp>(Pred, Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp>
|
|
m_SpecificICmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1) {
|
|
return SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp>(MatchPred, Op0,
|
|
Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
|
|
m_Cmp(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>(Op0,
|
|
Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
|
|
m_Cmp(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1) {
|
|
return Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>(
|
|
Pred, Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
|
|
m_SpecificCmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1) {
|
|
return SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>(
|
|
MatchPred, Op0, Op1);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline auto m_GetElementPtr(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_CombineOr(
|
|
Recipe_match<std::tuple<Op0_t, Op1_t>, Instruction::GetElementPtr,
|
|
/*Commutative*/ false, VPReplicateRecipe, VPWidenGEPRecipe>(
|
|
Op0, Op1),
|
|
VPInstruction_match<VPInstruction::PtrAdd, Op0_t, Op1_t>(Op0, Op1),
|
|
VPInstruction_match<VPInstruction::WidePtrAdd, Op0_t, Op1_t>(Op0, Op1));
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t, typename Op2_t>
|
|
inline AllRecipe_match<Instruction::Select, Op0_t, Op1_t, Op2_t>
|
|
m_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
|
|
return AllRecipe_match<Instruction::Select, Op0_t, Op1_t, Op2_t>(
|
|
{Op0, Op1, Op2});
|
|
}
|
|
|
|
template <typename Op0_t> inline auto m_Not(const Op0_t &Op0) {
|
|
return m_CombineOr(m_VPInstruction<VPInstruction::Not>(Op0),
|
|
m_c_Binary<Instruction::Xor>(m_AllOnes(), Op0));
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t, typename Op2_t>
|
|
inline auto m_c_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
|
|
return m_CombineOr(m_Select(Op0, Op1, Op2), m_Select(m_Not(Op0), Op2, Op1));
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline auto m_LogicalAnd(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_CombineOr(
|
|
m_VPInstruction<VPInstruction::LogicalAnd, Op0_t, Op1_t>(Op0, Op1),
|
|
m_Select(Op0, Op1, m_False()));
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline auto m_c_LogicalAnd(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_CombineOr(
|
|
m_c_VPInstruction<VPInstruction::LogicalAnd, Op0_t, Op1_t>(Op0, Op1),
|
|
m_c_Select(Op0, Op1, m_False()));
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline auto m_LogicalOr(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_CombineOr(
|
|
m_c_VPInstruction<VPInstruction::LogicalOr, Op0_t, Op1_t>(Op0, Op1),
|
|
m_Select(Op0, m_True(), Op1));
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline auto m_c_LogicalOr(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return m_c_Select(Op0, m_True(), Op1);
|
|
}
|
|
|
|
/// Match the canonical induction variable (IV) of any loop region.
|
|
struct canonical_iv_match {
|
|
template <typename ArgTy> bool match(const ArgTy *V) const {
|
|
const auto *RV = dyn_cast<VPRegionValue>(V);
|
|
return RV && RV->getDefiningRegion()->getCanonicalIV() == RV;
|
|
}
|
|
};
|
|
|
|
inline canonical_iv_match m_CanonicalIV() { return {}; }
|
|
|
|
template <typename Op0_t, typename Op1_t, typename Op2_t>
|
|
inline auto m_ScalarIVSteps(const Op0_t &Op0, const Op1_t &Op1,
|
|
const Op2_t &Op2) {
|
|
return Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t>, 0, false,
|
|
VPScalarIVStepsRecipe>({Op0, Op1, Op2});
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t, typename Op2_t>
|
|
inline auto m_DerivedIV(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
|
|
return Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t>, 0, false,
|
|
VPDerivedIVRecipe>({Op0, Op1, Op2});
|
|
}
|
|
|
|
template <typename Addr_t, typename Mask_t> struct Load_match {
|
|
Addr_t Addr;
|
|
Mask_t Mask;
|
|
|
|
Load_match(Addr_t Addr, Mask_t Mask) : Addr(Addr), Mask(Mask) {}
|
|
|
|
template <typename OpTy> bool match(const OpTy *V) const {
|
|
auto *Load = dyn_cast<VPWidenLoadRecipe>(V);
|
|
if (!Load || !Addr.match(Load->getAddr()) || !Load->isMasked() ||
|
|
!Mask.match(Load->getMask()))
|
|
return false;
|
|
return true;
|
|
}
|
|
};
|
|
|
|
/// Match a (possibly reversed) masked load.
|
|
template <typename Addr_t, typename Mask_t>
|
|
inline Load_match<Addr_t, Mask_t> m_MaskedLoad(const Addr_t &Addr,
|
|
const Mask_t &Mask) {
|
|
return Load_match<Addr_t, Mask_t>(Addr, Mask);
|
|
}
|
|
|
|
template <typename Addr_t, typename Val_t, typename Mask_t> struct Store_match {
|
|
Addr_t Addr;
|
|
Val_t Val;
|
|
Mask_t Mask;
|
|
|
|
Store_match(Addr_t Addr, Val_t Val, Mask_t Mask)
|
|
: Addr(Addr), Val(Val), Mask(Mask) {}
|
|
|
|
template <typename OpTy> bool match(const OpTy *V) const {
|
|
auto *Store = dyn_cast<VPWidenStoreRecipe>(V);
|
|
if (!Store || !Addr.match(Store->getAddr()) ||
|
|
!Val.match(Store->getStoredValue()) || !Store->isMasked() ||
|
|
!Mask.match(Store->getMask()))
|
|
return false;
|
|
return true;
|
|
}
|
|
};
|
|
|
|
/// Match a (possibly reversed) masked store.
|
|
template <typename Addr_t, typename Val_t, typename Mask_t>
|
|
inline Store_match<Addr_t, Val_t, Mask_t>
|
|
m_MaskedStore(const Addr_t &Addr, const Val_t &Val, const Mask_t &Mask) {
|
|
return Store_match<Addr_t, Val_t, Mask_t>(Addr, Val, Mask);
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
using VectorEndPointerRecipe_match =
|
|
Recipe_match<std::tuple<Op0_t, Op1_t>, 0,
|
|
/*Commutative*/ false, VPVectorEndPointerRecipe>;
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
VectorEndPointerRecipe_match<Op0_t, Op1_t> m_VecEndPtr(const Op0_t &Op0,
|
|
const Op1_t &Op1) {
|
|
return VectorEndPointerRecipe_match<Op0_t, Op1_t>(Op0, Op1);
|
|
}
|
|
|
|
/// Match a call argument at a given argument index.
|
|
template <typename Opnd_t> struct Argument_match {
|
|
/// Call argument index to match.
|
|
unsigned OpI;
|
|
Opnd_t Val;
|
|
|
|
Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
|
|
|
|
template <typename OpTy> bool match(OpTy *V) const {
|
|
if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V))
|
|
return Val.match(R->getOperand(OpI));
|
|
if (const auto *R = dyn_cast<VPWidenCallRecipe>(V))
|
|
return Val.match(R->getOperand(OpI));
|
|
if (const auto *R = dyn_cast<VPReplicateRecipe>(V))
|
|
if (R->getOpcode() == Instruction::Call)
|
|
return Val.match(R->getOperand(OpI));
|
|
if (const auto *R = dyn_cast<VPInstruction>(V))
|
|
if (R->getOpcode() == Instruction::Call)
|
|
return Val.match(R->getOperand(OpI));
|
|
return false;
|
|
}
|
|
};
|
|
|
|
/// Match a call argument.
|
|
template <unsigned OpI, typename Opnd_t>
|
|
inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
|
|
return Argument_match<Opnd_t>(OpI, Op);
|
|
}
|
|
|
|
/// Intrinsic matchers.
|
|
struct IntrinsicID_match {
|
|
unsigned ID;
|
|
|
|
IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
|
|
|
|
template <typename OpTy> bool match(OpTy *V) const {
|
|
if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V))
|
|
return R->getVectorIntrinsicID() == ID;
|
|
if (const auto *R = dyn_cast<VPWidenCallRecipe>(V))
|
|
return R->getCalledScalarFunction()->getIntrinsicID() == ID;
|
|
|
|
auto MatchCalleeIntrinsic = [&](VPValue *CalleeOp) {
|
|
if (!isa<VPIRValue>(CalleeOp))
|
|
return false;
|
|
auto *F = cast<Function>(CalleeOp->getLiveInIRValue());
|
|
return F->getIntrinsicID() == ID;
|
|
};
|
|
if (const auto *R = dyn_cast<VPReplicateRecipe>(V))
|
|
if (R->getOpcode() == Instruction::Call) {
|
|
// The mask is always the last operand if predicated.
|
|
return MatchCalleeIntrinsic(
|
|
R->getOperand(R->getNumOperands() - 1 - R->isPredicated()));
|
|
}
|
|
if (const auto *R = dyn_cast<VPInstruction>(V))
|
|
if (R->getOpcode() == Instruction::Call)
|
|
return MatchCalleeIntrinsic(R->getOperand(R->getNumOperands() - 1));
|
|
return false;
|
|
}
|
|
};
|
|
|
|
/// Intrinsic matches are combinations of ID matchers, and argument
|
|
/// matchers. Higher arity matcher are defined recursively in terms of and-ing
|
|
/// them with lower arity matchers. Here's some convenient typedefs for up to
|
|
/// several arguments, and more can be added as needed
|
|
template <typename T0 = void, typename T1 = void, typename T2 = void,
|
|
typename T3 = void>
|
|
struct m_Intrinsic_Ty;
|
|
template <typename T0> struct m_Intrinsic_Ty<T0> {
|
|
using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
|
|
};
|
|
template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
|
|
using Ty =
|
|
match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
|
|
};
|
|
template <typename T0, typename T1, typename T2>
|
|
struct m_Intrinsic_Ty<T0, T1, T2> {
|
|
using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
|
|
Argument_match<T2>>;
|
|
};
|
|
template <typename T0, typename T1, typename T2, typename T3>
|
|
struct m_Intrinsic_Ty {
|
|
using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
|
|
Argument_match<T3>>;
|
|
};
|
|
|
|
/// Match intrinsic calls like this:
|
|
/// m_Intrinsic<Intrinsic::fabs>(m_VPValue(X), ...)
|
|
template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
|
|
return IntrinsicID_match(IntrID);
|
|
}
|
|
|
|
/// Match intrinsic calls with a runtime intrinsic ID.
|
|
inline IntrinsicID_match m_Intrinsic(Intrinsic::ID IntrID) {
|
|
return IntrinsicID_match(IntrID);
|
|
}
|
|
|
|
template <Intrinsic::ID IntrID, typename T0>
|
|
inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
|
|
return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
|
|
}
|
|
|
|
template <Intrinsic::ID IntrID, typename T0, typename T1>
|
|
inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
|
|
const T1 &Op1) {
|
|
return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
|
|
}
|
|
|
|
template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
|
|
inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
|
|
m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
|
|
return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
|
|
}
|
|
|
|
template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
|
|
typename T3>
|
|
inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
|
|
m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
|
|
return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
|
|
}
|
|
|
|
template <Intrinsic::ID IntrID, typename... T>
|
|
inline auto m_WidenIntrinsic(const T &...Ops) {
|
|
return m_Isa<VPWidenIntrinsicRecipe>(m_Intrinsic<IntrID>(Ops...));
|
|
}
|
|
|
|
inline auto m_LiveIn() { return m_Isa<VPIRValue, VPSymbolicValue>(); }
|
|
|
|
/// Match a GEP recipe (VPWidenGEPRecipe, VPInstruction, or VPReplicateRecipe)
|
|
/// and bind the source element type and operands.
|
|
struct GetElementPtr_match {
|
|
Type *&SourceElementType;
|
|
ArrayRef<VPValue *> &Operands;
|
|
|
|
GetElementPtr_match(Type *&SourceElementType, ArrayRef<VPValue *> &Operands)
|
|
: SourceElementType(SourceElementType), Operands(Operands) {}
|
|
|
|
template <typename ITy> bool match(ITy *V) const {
|
|
return matchRecipeAndBind<VPWidenGEPRecipe>(V) ||
|
|
matchRecipeAndBind<VPInstruction>(V) ||
|
|
matchRecipeAndBind<VPReplicateRecipe>(V);
|
|
}
|
|
|
|
private:
|
|
template <typename RecipeTy> bool matchRecipeAndBind(const VPValue *V) const {
|
|
auto *DefR = dyn_cast<RecipeTy>(V);
|
|
if (!DefR)
|
|
return false;
|
|
|
|
if constexpr (std::is_same_v<RecipeTy, VPWidenGEPRecipe>) {
|
|
SourceElementType = DefR->getSourceElementType();
|
|
} else if (DefR->getOpcode() == Instruction::GetElementPtr) {
|
|
SourceElementType = cast<GetElementPtrInst>(DefR->getUnderlyingInstr())
|
|
->getSourceElementType();
|
|
} else if constexpr (std::is_same_v<RecipeTy, VPInstruction>) {
|
|
if (DefR->getOpcode() == VPInstruction::PtrAdd) {
|
|
// PtrAdd is a byte-offset GEP with i8 element type.
|
|
LLVMContext &Ctx = DefR->getParent()->getPlan()->getContext();
|
|
SourceElementType = Type::getInt8Ty(Ctx);
|
|
} else {
|
|
return false;
|
|
}
|
|
} else {
|
|
return false;
|
|
}
|
|
|
|
Operands = ArrayRef<VPValue *>(DefR->op_begin(), DefR->op_end());
|
|
return true;
|
|
}
|
|
};
|
|
|
|
/// Match a GEP recipe with any number of operands and bind source element type
|
|
/// and operands.
|
|
inline GetElementPtr_match m_GetElementPtr(Type *&SourceElementType,
|
|
ArrayRef<VPValue *> &Operands) {
|
|
return GetElementPtr_match(SourceElementType, Operands);
|
|
}
|
|
|
|
template <typename SubPattern_t> struct OneUse_match {
|
|
SubPattern_t SubPattern;
|
|
|
|
OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
|
|
|
|
template <typename OpTy> bool match(OpTy *V) const {
|
|
return V->hasOneUse() && SubPattern.match(V);
|
|
}
|
|
};
|
|
|
|
template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
|
|
return SubPattern;
|
|
}
|
|
|
|
inline match_bind<VPReductionPHIRecipe>
|
|
m_ReductionPhi(VPReductionPHIRecipe *&V) {
|
|
return V;
|
|
}
|
|
|
|
template <typename Op0_t, typename Op1_t>
|
|
inline auto m_VPPhi(const Op0_t &Op0, const Op1_t &Op1) {
|
|
return Recipe_match<std::tuple<Op0_t, Op1_t>, Instruction::PHI,
|
|
/*Commutative*/ false, VPInstruction>({Op0, Op1});
|
|
}
|
|
|
|
} // namespace llvm::VPlanPatternMatch
|
|
|
|
#endif
|