Since futureWeight was removed by
145cc9db2b, there is no more calls to
weightCalcHelper(LI, start, end)
393 lines
14 KiB
C++
393 lines
14 KiB
C++
//===- CalcSpillWeights.cpp -----------------------------------------------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "llvm/CodeGen/CalcSpillWeights.h"
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
|
#include "llvm/CodeGen/LiveIntervals.h"
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
#include "llvm/CodeGen/StackMaps.h"
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
|
#include "llvm/CodeGen/VirtRegMap.h"
|
|
#include "llvm/Support/Debug.h"
|
|
#include "llvm/Support/MathExtras.h"
|
|
#include "llvm/Support/raw_ostream.h"
|
|
#include <cassert>
|
|
#include <tuple>
|
|
|
|
using namespace llvm;
|
|
|
|
#define DEBUG_TYPE "calcspillweights"
|
|
|
|
void VirtRegAuxInfo::calculateSpillWeightsAndHints() {
|
|
LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
|
|
<< "********** Function: " << MF.getName() << '\n');
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
|
|
Register Reg = Register::index2VirtReg(I);
|
|
if (MRI.reg_nodbg_empty(Reg))
|
|
continue;
|
|
calculateSpillWeightAndHint(LIS.getInterval(Reg));
|
|
}
|
|
}
|
|
|
|
// Return the preferred allocation register for reg, given a COPY instruction.
|
|
Register VirtRegAuxInfo::copyHint(const MachineInstr *MI, Register Reg,
|
|
const TargetRegisterInfo &TRI,
|
|
const MachineRegisterInfo &MRI) {
|
|
unsigned Sub, HSub;
|
|
Register HReg;
|
|
if (MI->getOperand(0).getReg() == Reg) {
|
|
Sub = MI->getOperand(0).getSubReg();
|
|
HReg = MI->getOperand(1).getReg();
|
|
HSub = MI->getOperand(1).getSubReg();
|
|
} else {
|
|
Sub = MI->getOperand(1).getSubReg();
|
|
HReg = MI->getOperand(0).getReg();
|
|
HSub = MI->getOperand(0).getSubReg();
|
|
}
|
|
|
|
if (!HReg)
|
|
return 0;
|
|
|
|
if (HReg.isVirtual())
|
|
return Sub == HSub ? HReg : Register();
|
|
|
|
const TargetRegisterClass *RC = MRI.getRegClass(Reg);
|
|
MCRegister CopiedPReg = HSub ? TRI.getSubReg(HReg, HSub) : HReg.asMCReg();
|
|
if (RC->contains(CopiedPReg))
|
|
return CopiedPReg;
|
|
|
|
// Check if reg:sub matches so that a super register could be hinted.
|
|
if (Sub)
|
|
return TRI.getMatchingSuperReg(CopiedPReg, Sub, RC);
|
|
|
|
return Register();
|
|
}
|
|
|
|
// Check if all values in LI are rematerializable
|
|
bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI,
|
|
const LiveIntervals &LIS,
|
|
const VirtRegMap &VRM,
|
|
const MachineRegisterInfo &MRI,
|
|
const TargetInstrInfo &TII) {
|
|
Register Reg = LI.reg();
|
|
Register Original = VRM.getOriginal(Reg);
|
|
SmallDenseMap<unsigned, MachineInstr *> VNIDefs;
|
|
for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
|
|
I != E; ++I) {
|
|
const VNInfo *VNI = *I;
|
|
const VNInfo *OrigVNI = VNI;
|
|
if (VNI->isUnused())
|
|
continue;
|
|
if (VNI->isPHIDef())
|
|
return false;
|
|
|
|
MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
|
|
assert(MI && "Dead valno in interval");
|
|
|
|
// Trace copies introduced by live range splitting. The inline
|
|
// spiller can rematerialize through these copies, so the spill
|
|
// weight must reflect this.
|
|
while (TII.isFullCopyInstr(*MI)) {
|
|
// The copy destination must match the interval register.
|
|
if (MI->getOperand(0).getReg() != Reg)
|
|
return false;
|
|
|
|
// Get the source register.
|
|
Reg = MI->getOperand(1).getReg();
|
|
|
|
// If the original (pre-splitting) registers match this
|
|
// copy came from a split.
|
|
if (!Reg.isVirtual() || VRM.getOriginal(Reg) != Original)
|
|
return false;
|
|
|
|
// Follow the copy live-in value.
|
|
const LiveInterval &SrcLI = LIS.getInterval(Reg);
|
|
LiveQueryResult SrcQ = SrcLI.Query(VNI->def);
|
|
VNI = SrcQ.valueIn();
|
|
assert(VNI && "Copy from non-existing value");
|
|
if (VNI->isPHIDef())
|
|
return false;
|
|
MI = LIS.getInstructionFromIndex(VNI->def);
|
|
assert(MI && "Dead valno in interval");
|
|
}
|
|
|
|
if (!TII.isReMaterializable(*MI))
|
|
return false;
|
|
|
|
VNIDefs[OrigVNI->id] = MI;
|
|
}
|
|
|
|
// If MI has register uses, it will only be rematerializable if its uses are
|
|
// also live at the indices it will be rematerialized at.
|
|
for (MachineOperand &MO : MRI.reg_nodbg_operands(LI.reg())) {
|
|
if (!MO.readsReg())
|
|
continue;
|
|
SlotIndex UseIdx = LIS.getInstructionIndex(*MO.getParent());
|
|
MachineInstr *Def = VNIDefs[LI.getVNInfoAt(UseIdx)->id];
|
|
assert(Def && "Use with no def");
|
|
if (!allUsesAvailableAt(Def, UseIdx, LIS, MRI, TII))
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool VirtRegAuxInfo::allUsesAvailableAt(const MachineInstr *MI,
|
|
SlotIndex UseIdx,
|
|
const LiveIntervals &LIS,
|
|
const MachineRegisterInfo &MRI,
|
|
const TargetInstrInfo &TII) {
|
|
SlotIndex OrigIdx = LIS.getInstructionIndex(*MI).getRegSlot(true);
|
|
UseIdx = std::max(UseIdx, UseIdx.getRegSlot(true));
|
|
for (const MachineOperand &MO : MI->operands()) {
|
|
if (!MO.isReg() || !MO.getReg() || !MO.readsReg())
|
|
continue;
|
|
|
|
// We can't remat physreg uses, unless it is a constant or target wants
|
|
// to ignore this use.
|
|
if (MO.getReg().isPhysical()) {
|
|
if (MRI.isConstantPhysReg(MO.getReg()) || TII.isIgnorableUse(MO))
|
|
continue;
|
|
return false;
|
|
}
|
|
|
|
const LiveInterval &li = LIS.getInterval(MO.getReg());
|
|
const VNInfo *OVNI = li.getVNInfoAt(OrigIdx);
|
|
if (!OVNI)
|
|
continue;
|
|
|
|
// Don't allow rematerialization immediately after the original def.
|
|
// It would be incorrect if OrigMI redefines the register.
|
|
// See PR14098.
|
|
if (SlotIndex::isSameInstr(OrigIdx, UseIdx))
|
|
return false;
|
|
|
|
if (OVNI != li.getVNInfoAt(UseIdx))
|
|
return false;
|
|
|
|
// Check that subrange is live at UseIdx.
|
|
if (li.hasSubRanges()) {
|
|
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
|
|
unsigned SubReg = MO.getSubReg();
|
|
LaneBitmask LM = SubReg ? TRI->getSubRegIndexLaneMask(SubReg)
|
|
: MRI.getMaxLaneMaskForVReg(MO.getReg());
|
|
for (const LiveInterval::SubRange &SR : li.subranges()) {
|
|
if ((SR.LaneMask & LM).none())
|
|
continue;
|
|
if (!SR.liveAt(UseIdx))
|
|
return false;
|
|
// Early exit if all used lanes are checked. No need to continue.
|
|
LM &= ~SR.LaneMask;
|
|
if (LM.none())
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
bool VirtRegAuxInfo::isLiveAtStatepointVarArg(LiveInterval &LI) {
|
|
return any_of(VRM.getRegInfo().reg_operands(LI.reg()),
|
|
[](MachineOperand &MO) {
|
|
MachineInstr *MI = MO.getParent();
|
|
if (MI->getOpcode() != TargetOpcode::STATEPOINT)
|
|
return false;
|
|
return StatepointOpers(MI).getVarIdx() <= MO.getOperandNo();
|
|
});
|
|
}
|
|
|
|
void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &LI) {
|
|
float Weight = weightCalcHelper(LI);
|
|
// Check if unspillable.
|
|
if (Weight < 0)
|
|
return;
|
|
LI.setWeight(Weight);
|
|
}
|
|
|
|
static bool canMemFoldInlineAsm(LiveInterval &LI,
|
|
const MachineRegisterInfo &MRI) {
|
|
for (const MachineOperand &MO : MRI.reg_operands(LI.reg())) {
|
|
const MachineInstr *MI = MO.getParent();
|
|
if (MI->isInlineAsm() && MI->mayFoldInlineAsmRegOp(MI->getOperandNo(&MO)))
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI) {
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
|
|
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
|
|
MachineBasicBlock *MBB = nullptr;
|
|
float TotalWeight = 0;
|
|
unsigned NumInstr = 0; // Number of instructions using LI
|
|
SmallPtrSet<MachineInstr *, 8> Visited;
|
|
|
|
std::pair<unsigned, Register> TargetHint = MRI.getRegAllocationHint(LI.reg());
|
|
|
|
if (LI.isSpillable()) {
|
|
Register Reg = LI.reg();
|
|
Register Original = VRM.getOriginal(Reg);
|
|
const LiveInterval &OrigInt = LIS.getInterval(Original);
|
|
// li comes from a split of OrigInt. If OrigInt was marked
|
|
// as not spillable, make sure the new interval is marked
|
|
// as not spillable as well.
|
|
if (!OrigInt.isSpillable())
|
|
LI.markNotSpillable();
|
|
}
|
|
|
|
// Don't recompute spill weight for an unspillable register.
|
|
bool IsSpillable = LI.isSpillable();
|
|
|
|
// CopyHint is a sortable hint derived from a COPY instruction.
|
|
struct CopyHint {
|
|
Register Reg;
|
|
float Weight;
|
|
bool IsCSR;
|
|
CopyHint(Register R, float W, bool IsCSR)
|
|
: Reg(R), Weight(W), IsCSR(IsCSR) {}
|
|
bool operator<(const CopyHint &Rhs) const {
|
|
// Always prefer any physreg hint.
|
|
if (Reg.isPhysical() != Rhs.Reg.isPhysical())
|
|
return Reg.isPhysical();
|
|
if (Weight != Rhs.Weight)
|
|
return (Weight > Rhs.Weight);
|
|
// Prefer non-CSR to CSR.
|
|
if (Reg.isPhysical() && IsCSR != Rhs.IsCSR)
|
|
return !IsCSR;
|
|
return Reg.id() < Rhs.Reg.id(); // Tie-breaker.
|
|
}
|
|
};
|
|
|
|
bool IsExiting = false;
|
|
SmallDenseMap<Register, float, 8> Hint;
|
|
for (MachineRegisterInfo::reg_instr_nodbg_iterator
|
|
I = MRI.reg_instr_nodbg_begin(LI.reg()),
|
|
E = MRI.reg_instr_nodbg_end();
|
|
I != E;) {
|
|
MachineInstr *MI = &*(I++);
|
|
|
|
NumInstr++;
|
|
bool identityCopy = false;
|
|
auto DestSrc = TII.isCopyInstr(*MI);
|
|
if (DestSrc) {
|
|
const MachineOperand *DestRegOp = DestSrc->Destination;
|
|
const MachineOperand *SrcRegOp = DestSrc->Source;
|
|
identityCopy = DestRegOp->getReg() == SrcRegOp->getReg() &&
|
|
DestRegOp->getSubReg() == SrcRegOp->getSubReg();
|
|
}
|
|
|
|
if (identityCopy || MI->isImplicitDef())
|
|
continue;
|
|
if (!Visited.insert(MI).second)
|
|
continue;
|
|
|
|
// For terminators that produce values, ask the backend if the register is
|
|
// not spillable.
|
|
if (TII.isUnspillableTerminator(MI) &&
|
|
MI->definesRegister(LI.reg(), /*TRI=*/nullptr)) {
|
|
LI.markNotSpillable();
|
|
return -1.0f;
|
|
}
|
|
|
|
// Force Weight onto the stack so that x86 doesn't add hidden precision.
|
|
stack_float_t Weight = 1.0f;
|
|
if (IsSpillable) {
|
|
// Get loop info for mi.
|
|
if (MI->getParent() != MBB) {
|
|
MBB = MI->getParent();
|
|
const MachineLoop *Loop = Loops.getLoopFor(MBB);
|
|
IsExiting = Loop ? Loop->isLoopExiting(MBB) : false;
|
|
}
|
|
|
|
// Calculate instr weight.
|
|
bool Reads, Writes;
|
|
std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg());
|
|
Weight = LiveIntervals::getSpillWeight(Writes, Reads, &MBFI, *MI, PSI);
|
|
|
|
// Give extra weight to what looks like a loop induction variable update.
|
|
if (Writes && IsExiting && LIS.isLiveOutOfMBB(LI, MBB))
|
|
Weight *= 3;
|
|
|
|
TotalWeight += Weight;
|
|
}
|
|
|
|
// Get allocation hints from copies.
|
|
if (!TII.isCopyInstr(*MI))
|
|
continue;
|
|
Register HintReg = copyHint(MI, LI.reg(), TRI, MRI);
|
|
if (HintReg && (HintReg.isVirtual() || MRI.isAllocatable(HintReg)))
|
|
Hint[HintReg] += Weight;
|
|
}
|
|
|
|
// Pass all the sorted copy hints to mri.
|
|
if (Hint.size()) {
|
|
// Remove a generic hint if previously added by target.
|
|
if (TargetHint.first == 0 && TargetHint.second)
|
|
MRI.clearSimpleHint(LI.reg());
|
|
|
|
// Don't add the target-type hint again.
|
|
Register SkipReg = TargetHint.first != 0 ? TargetHint.second : Register();
|
|
SmallVector<CopyHint, 8> RegHints;
|
|
for (const auto &[Reg, Weight] : Hint) {
|
|
if (Reg != SkipReg)
|
|
RegHints.emplace_back(
|
|
Reg, Weight,
|
|
Reg.isPhysical() ? TRI.isCalleeSavedPhysReg(Reg, MF) : false);
|
|
}
|
|
sort(RegHints);
|
|
for (const auto &[Reg, _, __] : RegHints)
|
|
MRI.addRegAllocationHint(LI.reg(), Reg);
|
|
|
|
// Weakly boost the spill weight of hinted registers.
|
|
TotalWeight *= 1.01F;
|
|
}
|
|
|
|
// If the live interval was already unspillable, leave it that way.
|
|
if (!IsSpillable)
|
|
return -1.0;
|
|
|
|
// Mark li as unspillable if all live ranges are tiny and the interval
|
|
// is not live at any reg mask. If the interval is live at a reg mask
|
|
// spilling may be required. If li is live as use in statepoint instruction
|
|
// spilling may be required due to if we mark interval with use in statepoint
|
|
// as not spillable we are risky to end up with no register to allocate.
|
|
// At the same time STATEPOINT instruction is perfectly fine to have this
|
|
// operand on stack, so spilling such interval and folding its load from stack
|
|
// into instruction itself makes perfect sense.
|
|
if (LI.isZeroLength(LIS.getSlotIndexes()) &&
|
|
!LI.isLiveAtIndexes(LIS.getRegMaskSlots()) &&
|
|
!isLiveAtStatepointVarArg(LI) && !canMemFoldInlineAsm(LI, MRI)) {
|
|
LI.markNotSpillable();
|
|
return -1.0;
|
|
}
|
|
|
|
// If all of the definitions of the interval are re-materializable,
|
|
// it is a preferred candidate for spilling.
|
|
// FIXME: this gets much more complicated once we support non-trivial
|
|
// re-materialization.
|
|
if (isRematerializable(LI, LIS, VRM, MRI, *MF.getSubtarget().getInstrInfo()))
|
|
TotalWeight *= 0.5F;
|
|
|
|
// Finally, we scale the weight by the scale factor of register class.
|
|
const TargetRegisterClass *RC = MRI.getRegClass(LI.reg());
|
|
TotalWeight *= TRI.getSpillWeightScaleFactor(RC);
|
|
|
|
return normalize(TotalWeight, LI.getSize(), NumInstr);
|
|
}
|