[MemProf] Skip handling of memprof records for non-prevailing functions (#185963)
When building the combined summary index during a thin link, we already performed a memory optimization for non-prevailing copies of a function by not recording their allocation and callsite info in the associated function summary. We can save on the thin link time as well by avoiding building the memprof summary structures just to throw them away later in the non-prevailing case. The reason we were eagerly building these structures is that the memprof summaries *precede* the corresponding function summary record, and we don't know whether this is the prevailing copy of the function until we parse the function summary record. To facilitate the new handling, we emit the memprof summary records *after* the corresponding function summary record. The bitcode summary version is bumped, and the reader is changed to support both versions, for backwards compatibility. Note that there is already a memprof test that tests an older record type and will also test reading of the legacy version of the ordering: (llvm/test/ThinLTO/X86/memprof-old-alloc-context-summary.ll. To make the new handling even more efficient, the lookup/insertion of stack IDs in the combined summary index and the caching of their corresponding stack index in the StackIdToIndex map is made lazy. This resulted in a 27% reduction in thin link time for a large target (21% without the lazy insertion change).
This commit is contained in:
@@ -1102,10 +1102,10 @@ public:
|
||||
return *Callsites;
|
||||
}
|
||||
|
||||
void addCallsite(CallsiteInfo &Callsite) {
|
||||
void addCallsite(CallsiteInfo &&Callsite) {
|
||||
if (!Callsites)
|
||||
Callsites = std::make_unique<CallsitesTy>();
|
||||
Callsites->push_back(Callsite);
|
||||
Callsites->push_back(std::move(Callsite));
|
||||
}
|
||||
|
||||
ArrayRef<AllocInfo> allocs() const {
|
||||
@@ -1114,6 +1114,12 @@ public:
|
||||
return {};
|
||||
}
|
||||
|
||||
void addAlloc(AllocInfo &&Alloc) {
|
||||
if (!Allocs)
|
||||
Allocs = std::make_unique<AllocsTy>();
|
||||
Allocs->push_back(std::move(Alloc));
|
||||
}
|
||||
|
||||
AllocsTy &mutableAllocs() {
|
||||
assert(Allocs);
|
||||
return *Allocs;
|
||||
@@ -1574,7 +1580,7 @@ public:
|
||||
// in the way some record are interpreted, like flags for instance.
|
||||
// Note that incrementing this may require changes in both BitcodeReader.cpp
|
||||
// and BitcodeWriter.cpp.
|
||||
static constexpr uint64_t BitcodeSummaryVersion = 12;
|
||||
static constexpr uint64_t BitcodeSummaryVersion = 13;
|
||||
|
||||
// Regular LTO module name for ASM writer
|
||||
static constexpr const char *getRegularLTOModuleName() {
|
||||
|
||||
@@ -985,8 +985,8 @@ class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase {
|
||||
std::vector<uint64_t> RadixArray;
|
||||
|
||||
/// Map from the module's stack id index to the index in the
|
||||
/// ModuleSummaryIndex's StackIds vector. Populated when the STACK_IDS record
|
||||
/// is processed and used to avoid repeated hash lookups.
|
||||
/// ModuleSummaryIndex's StackIds vector. Populated lazily from the StackIds
|
||||
/// list and used to avoid repeated hash lookups.
|
||||
std::vector<unsigned> StackIdToIndex;
|
||||
|
||||
public:
|
||||
@@ -1018,6 +1018,19 @@ private:
|
||||
SmallVector<unsigned> parseAllocInfoContext(ArrayRef<uint64_t> Record,
|
||||
unsigned &I);
|
||||
|
||||
// Mark uninitialized stack ID mappings for lazy population.
|
||||
static constexpr unsigned UninitializedStackIdIndex =
|
||||
std::numeric_limits<unsigned>::max();
|
||||
|
||||
unsigned getStackIdIndex(unsigned LocalIndex) {
|
||||
unsigned &Index = StackIdToIndex[LocalIndex];
|
||||
// Add the stack id to the ModuleSummaryIndex map only when first requested
|
||||
// and cache the result in the local StackIdToIndex map.
|
||||
if (Index == UninitializedStackIdIndex)
|
||||
Index = TheIndex.addOrGetStackIdIndex(StackIds[LocalIndex]);
|
||||
return Index;
|
||||
}
|
||||
|
||||
template <bool AllowNullValueInfo = false>
|
||||
std::pair<ValueInfo, GlobalValue::GUID>
|
||||
getValueInfoFromValueId(unsigned ValueId);
|
||||
@@ -7650,7 +7663,7 @@ SmallVector<unsigned> ModuleSummaryIndexBitcodeReader::parseAllocInfoContext(
|
||||
StackIdList.reserve(NumStackEntries);
|
||||
for (unsigned J = 0; J < NumStackEntries; J++) {
|
||||
assert(Record[I] < StackIds.size());
|
||||
StackIdList.push_back(StackIdToIndex[Record[I++]]);
|
||||
StackIdList.push_back(getStackIdIndex(Record[I++]));
|
||||
}
|
||||
} else {
|
||||
unsigned RadixIndex = Record[I++];
|
||||
@@ -7673,7 +7686,7 @@ SmallVector<unsigned> ModuleSummaryIndexBitcodeReader::parseAllocInfoContext(
|
||||
assert(static_cast<std::make_signed_t<unsigned>>(Elem) >= 0);
|
||||
}
|
||||
RadixIndex++;
|
||||
StackIdList.push_back(StackIdToIndex[Elem]);
|
||||
StackIdList.push_back(getStackIdIndex(Elem));
|
||||
}
|
||||
}
|
||||
return StackIdList;
|
||||
@@ -7715,6 +7728,9 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
}
|
||||
const uint64_t Version = Record[0];
|
||||
const bool IsOldProfileFormat = Version == 1;
|
||||
// Starting with bitcode summary version 13, MemProf records follow the
|
||||
// corresponding function summary.
|
||||
const bool MemProfAfterFunctionSummary = Version >= 13;
|
||||
if (Version < 1 || Version > ModuleSummaryIndex::BitcodeSummaryVersion)
|
||||
return error("Invalid summary version " + Twine(Version) +
|
||||
". Version should be in the range [1-" +
|
||||
@@ -7727,6 +7743,15 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
GlobalValueSummary *LastSeenSummary = nullptr;
|
||||
GlobalValue::GUID LastSeenGUID = 0;
|
||||
|
||||
// Track the most recent function summary if it was prevailing, and while we
|
||||
// are not done processing any subsequent memprof records. Starting with
|
||||
// summary version 13 (tracked by MemProfAfterFunctionSummary), MemProf
|
||||
// records follow the function summary and we skip processing them when the
|
||||
// summary is not prevailing. Note that when reading a combined index we don't
|
||||
// know what is prevailing so this should always be set in the new format when
|
||||
// we encounter MemProf records.
|
||||
FunctionSummary *CurrentPrevailingFS = nullptr;
|
||||
|
||||
// We can expect to see any number of type ID information records before
|
||||
// each function summary records; these variables store the information
|
||||
// collected so far so that it can be used to create the summary object.
|
||||
@@ -7769,7 +7794,9 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
Expected<unsigned> MaybeBitCode = Stream.readRecord(Entry.ID, Record);
|
||||
if (!MaybeBitCode)
|
||||
return MaybeBitCode.takeError();
|
||||
switch (unsigned BitCode = MaybeBitCode.get()) {
|
||||
unsigned BitCode = MaybeBitCode.get();
|
||||
|
||||
switch (BitCode) {
|
||||
default: // Default behavior: ignore.
|
||||
break;
|
||||
case bitc::FS_FLAGS: { // [flags]
|
||||
@@ -7841,16 +7868,26 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex),
|
||||
IsOldProfileFormat, HasProfile, HasRelBF);
|
||||
setSpecialRefs(Refs, NumRORefs, NumWORefs);
|
||||
auto VIAndOriginalGUID = getValueInfoFromValueId(ValueID);
|
||||
// In order to save memory, only record the memprof summaries if this is
|
||||
// the prevailing copy of a symbol. The linker doesn't resolve local
|
||||
// linkage values so don't check whether those are prevailing.
|
||||
auto [VI, GUID] = getValueInfoFromValueId(ValueID);
|
||||
|
||||
// The linker doesn't resolve local linkage values so don't check whether
|
||||
// those are prevailing (set IsPrevailingSym so they are always processed
|
||||
// and kept).
|
||||
auto LT = (GlobalValue::LinkageTypes)Flags.Linkage;
|
||||
if (IsPrevailing && !GlobalValue::isLocalLinkage(LT) &&
|
||||
!IsPrevailing(VIAndOriginalGUID.first.getGUID())) {
|
||||
bool IsPrevailingSym = !IsPrevailing || GlobalValue::isLocalLinkage(LT) ||
|
||||
IsPrevailing(VI.getGUID());
|
||||
|
||||
// If this is not the prevailing copy, and the records are in the "old"
|
||||
// order (preceding), clear them now. They should already be empty in
|
||||
// the new order (following), as they are processed or skipped immediately
|
||||
// when they follow the summary.
|
||||
assert(!MemProfAfterFunctionSummary ||
|
||||
(PendingCallsites.empty() && PendingAllocs.empty()));
|
||||
if (!IsPrevailingSym && !MemProfAfterFunctionSummary) {
|
||||
PendingCallsites.clear();
|
||||
PendingAllocs.clear();
|
||||
}
|
||||
|
||||
auto FS = std::make_unique<FunctionSummary>(
|
||||
Flags, InstCount, getDecodedFFlags(RawFunFlags), std::move(Refs),
|
||||
std::move(Calls), std::move(PendingTypeTests),
|
||||
@@ -7861,9 +7898,16 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
std::move(PendingParamAccesses), std::move(PendingCallsites),
|
||||
std::move(PendingAllocs));
|
||||
FS->setModulePath(getThisModule()->first());
|
||||
FS->setOriginalName(std::get<1>(VIAndOriginalGUID));
|
||||
TheIndex.addGlobalValueSummary(std::get<0>(VIAndOriginalGUID),
|
||||
std::move(FS));
|
||||
FS->setOriginalName(GUID);
|
||||
// Set CurrentPrevailingFS only if prevailing, so subsequent MemProf
|
||||
// records are attached (new order) or skipped.
|
||||
if (MemProfAfterFunctionSummary) {
|
||||
if (IsPrevailingSym)
|
||||
CurrentPrevailingFS = FS.get();
|
||||
else
|
||||
CurrentPrevailingFS = nullptr;
|
||||
}
|
||||
TheIndex.addGlobalValueSummary(VI, std::move(FS));
|
||||
break;
|
||||
}
|
||||
// FS_ALIAS: [valueid, flags, valueid]
|
||||
@@ -8005,6 +8049,8 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
std::move(PendingParamAccesses), std::move(PendingCallsites),
|
||||
std::move(PendingAllocs));
|
||||
LastSeenSummary = FS.get();
|
||||
if (MemProfAfterFunctionSummary)
|
||||
CurrentPrevailingFS = FS.get();
|
||||
LastSeenGUID = VI.getGUID();
|
||||
FS->setModulePath(ModuleIdMap[ModuleId]);
|
||||
TheIndex.addGlobalValueSummary(VI, std::move(FS));
|
||||
@@ -8149,9 +8195,8 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
StackIds.push_back(*R << 32 | *(R + 1));
|
||||
}
|
||||
assert(StackIdToIndex.empty());
|
||||
StackIdToIndex.reserve(StackIds.size());
|
||||
for (uint64_t StackId : StackIds)
|
||||
StackIdToIndex.push_back(TheIndex.addOrGetStackIdIndex(StackId));
|
||||
// Initialize with a marker to support lazy population.
|
||||
StackIdToIndex.resize(StackIds.size(), UninitializedStackIdIndex);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -8161,18 +8206,29 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
}
|
||||
|
||||
case bitc::FS_PERMODULE_CALLSITE_INFO: {
|
||||
// If they are in the new order (following), they are skipped when they
|
||||
// follow a non-prevailing summary (CurrentPrevailingFS will be null).
|
||||
if (MemProfAfterFunctionSummary && !CurrentPrevailingFS)
|
||||
break;
|
||||
unsigned ValueID = Record[0];
|
||||
SmallVector<unsigned> StackIdList;
|
||||
for (uint64_t R : drop_begin(Record)) {
|
||||
assert(R < StackIds.size());
|
||||
StackIdList.push_back(StackIdToIndex[R]);
|
||||
StackIdList.push_back(getStackIdIndex(R));
|
||||
}
|
||||
ValueInfo VI = std::get<0>(getValueInfoFromValueId(ValueID));
|
||||
PendingCallsites.push_back(CallsiteInfo({VI, std::move(StackIdList)}));
|
||||
if (MemProfAfterFunctionSummary)
|
||||
CurrentPrevailingFS->addCallsite(
|
||||
CallsiteInfo({VI, std::move(StackIdList)}));
|
||||
else
|
||||
PendingCallsites.push_back(CallsiteInfo({VI, std::move(StackIdList)}));
|
||||
break;
|
||||
}
|
||||
|
||||
case bitc::FS_COMBINED_CALLSITE_INFO: {
|
||||
// In the combined index case we don't have a prevailing check,
|
||||
// so we should always have a CurrentPrevailingFS.
|
||||
assert(!MemProfAfterFunctionSummary || CurrentPrevailingFS);
|
||||
auto RecordIter = Record.begin();
|
||||
unsigned ValueID = *RecordIter++;
|
||||
unsigned NumStackIds = *RecordIter++;
|
||||
@@ -8181,19 +8237,27 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
SmallVector<unsigned> StackIdList;
|
||||
for (unsigned J = 0; J < NumStackIds; J++) {
|
||||
assert(*RecordIter < StackIds.size());
|
||||
StackIdList.push_back(StackIdToIndex[*RecordIter++]);
|
||||
StackIdList.push_back(getStackIdIndex(*RecordIter++));
|
||||
}
|
||||
SmallVector<unsigned> Versions;
|
||||
for (unsigned J = 0; J < NumVersions; J++)
|
||||
Versions.push_back(*RecordIter++);
|
||||
ValueInfo VI = std::get<0>(
|
||||
getValueInfoFromValueId</*AllowNullValueInfo*/ true>(ValueID));
|
||||
PendingCallsites.push_back(
|
||||
CallsiteInfo({VI, std::move(Versions), std::move(StackIdList)}));
|
||||
if (MemProfAfterFunctionSummary)
|
||||
CurrentPrevailingFS->addCallsite(
|
||||
CallsiteInfo({VI, std::move(Versions), std::move(StackIdList)}));
|
||||
else
|
||||
PendingCallsites.push_back(
|
||||
CallsiteInfo({VI, std::move(Versions), std::move(StackIdList)}));
|
||||
break;
|
||||
}
|
||||
|
||||
case bitc::FS_ALLOC_CONTEXT_IDS: {
|
||||
// If they are in the new order (following), they are skipped when they
|
||||
// follow a non-prevailing summary (CurrentPrevailingFS will be null).
|
||||
if (MemProfAfterFunctionSummary && !CurrentPrevailingFS)
|
||||
break;
|
||||
// This is an array of 32-bit fixed-width values, holding each 64-bit
|
||||
// context id as a pair of adjacent (most significant first) 32-bit words.
|
||||
assert(Record.size() % 2 == 0);
|
||||
@@ -8204,6 +8268,12 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
}
|
||||
|
||||
case bitc::FS_PERMODULE_ALLOC_INFO: {
|
||||
// If they are in the new order (following), they are skipped when they
|
||||
// follow a non-prevailing summary (CurrentPrevailingFS will be null).
|
||||
if (MemProfAfterFunctionSummary && !CurrentPrevailingFS) {
|
||||
PendingContextIds.clear();
|
||||
break;
|
||||
}
|
||||
unsigned I = 0;
|
||||
std::vector<MIBInfo> MIBs;
|
||||
unsigned NumMIBs = 0;
|
||||
@@ -8256,16 +8326,24 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
}
|
||||
PendingContextIds.clear();
|
||||
}
|
||||
PendingAllocs.push_back(AllocInfo(std::move(MIBs)));
|
||||
AllocInfo AI(std::move(MIBs));
|
||||
if (!AllContextSizes.empty()) {
|
||||
assert(PendingAllocs.back().MIBs.size() == AllContextSizes.size());
|
||||
PendingAllocs.back().ContextSizeInfos = std::move(AllContextSizes);
|
||||
assert(AI.MIBs.size() == AllContextSizes.size());
|
||||
AI.ContextSizeInfos = std::move(AllContextSizes);
|
||||
}
|
||||
|
||||
if (MemProfAfterFunctionSummary)
|
||||
CurrentPrevailingFS->addAlloc(std::move(AI));
|
||||
else
|
||||
PendingAllocs.push_back(std::move(AI));
|
||||
break;
|
||||
}
|
||||
|
||||
case bitc::FS_COMBINED_ALLOC_INFO:
|
||||
case bitc::FS_COMBINED_ALLOC_INFO_NO_CONTEXT: {
|
||||
// In the combined index case we don't have a prevailing check,
|
||||
// so we should always have a CurrentPrevailingFS.
|
||||
assert(!MemProfAfterFunctionSummary || CurrentPrevailingFS);
|
||||
unsigned I = 0;
|
||||
std::vector<MIBInfo> MIBs;
|
||||
unsigned NumMIBs = Record[I++];
|
||||
@@ -8284,7 +8362,11 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
|
||||
for (unsigned J = 0; J < NumVersions; J++)
|
||||
Versions.push_back(Record[I++]);
|
||||
assert(I == Record.size());
|
||||
PendingAllocs.push_back(AllocInfo(std::move(Versions), std::move(MIBs)));
|
||||
AllocInfo AI(std::move(Versions), std::move(MIBs));
|
||||
if (MemProfAfterFunctionSummary)
|
||||
CurrentPrevailingFS->addAlloc(std::move(AI));
|
||||
else
|
||||
PendingAllocs.push_back(std::move(AI));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4559,13 +4559,6 @@ void ModuleBitcodeWriterBase::writePerModuleFunctionSummaryRecord(
|
||||
return {VE.getValueID(VI.getValue())};
|
||||
});
|
||||
|
||||
writeFunctionHeapProfileRecords(
|
||||
Stream, FS, CallsiteAbbrev, AllocAbbrev, ContextIdAbbvId,
|
||||
/*PerModule*/ true,
|
||||
/*GetValueId*/ [&](const ValueInfo &VI) { return getValueId(VI); },
|
||||
/*GetStackIndex*/ [&](unsigned I) { return I; },
|
||||
/*WriteContextSizeInfoIndex*/ true, CallStackPos, CallStackCount);
|
||||
|
||||
auto SpecialRefCnts = FS->specialRefCounts();
|
||||
NameVals.push_back(getEncodedGVSummaryFlags(FS->flags()));
|
||||
NameVals.push_back(FS->instCount());
|
||||
@@ -4585,6 +4578,13 @@ void ModuleBitcodeWriterBase::writePerModuleFunctionSummaryRecord(
|
||||
// Emit the finished record.
|
||||
Stream.EmitRecord(bitc::FS_PERMODULE_PROFILE, NameVals, FSCallsProfileAbbrev);
|
||||
NameVals.clear();
|
||||
|
||||
writeFunctionHeapProfileRecords(
|
||||
Stream, FS, CallsiteAbbrev, AllocAbbrev, ContextIdAbbvId,
|
||||
/*PerModule*/ true,
|
||||
/*GetValueId*/ [&](const ValueInfo &VI) { return getValueId(VI); },
|
||||
/*GetStackIndex*/ [&](unsigned I) { return I; },
|
||||
/*WriteContextSizeInfoIndex*/ true, CallStackPos, CallStackCount);
|
||||
}
|
||||
|
||||
// Collect the global value references in the given variable's initializer,
|
||||
@@ -5137,30 +5137,6 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
|
||||
writeFunctionTypeMetadataRecords(Stream, FS, GetValueId);
|
||||
getReferencedTypeIds(FS, ReferencedTypeIds);
|
||||
|
||||
writeFunctionHeapProfileRecords(
|
||||
Stream, FS, CallsiteAbbrev, AllocAbbrev, /*ContextIdAbbvId*/ 0,
|
||||
/*PerModule*/ false,
|
||||
/*GetValueId*/
|
||||
[&](const ValueInfo &VI) -> unsigned {
|
||||
std::optional<unsigned> ValueID = GetValueId(VI);
|
||||
// This can happen in shared index files for distributed ThinLTO if
|
||||
// the callee function summary is not included. Record 0 which we
|
||||
// will have to deal with conservatively when doing any kind of
|
||||
// validation in the ThinLTO backends.
|
||||
if (!ValueID)
|
||||
return 0;
|
||||
return *ValueID;
|
||||
},
|
||||
/*GetStackIndex*/
|
||||
[&](unsigned I) {
|
||||
// Get the corresponding index into the list of StackIds actually
|
||||
// being written for this combined index (which may be a subset in
|
||||
// the case of distributed indexes).
|
||||
assert(StackIdIndicesToIndex.contains(I));
|
||||
return StackIdIndicesToIndex[I];
|
||||
},
|
||||
/*WriteContextSizeInfoIndex*/ false, CallStackPos, CallStackCount);
|
||||
|
||||
NameVals.push_back(*ValueId);
|
||||
assert(ModuleIdMap.count(FS->modulePath()));
|
||||
NameVals.push_back(ModuleIdMap[FS->modulePath()]);
|
||||
@@ -5206,6 +5182,31 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
|
||||
Stream.EmitRecord(bitc::FS_COMBINED_PROFILE, NameVals,
|
||||
FSCallsProfileAbbrev);
|
||||
NameVals.clear();
|
||||
|
||||
writeFunctionHeapProfileRecords(
|
||||
Stream, FS, CallsiteAbbrev, AllocAbbrev, /*ContextIdAbbvId*/ 0,
|
||||
/*PerModule*/ false,
|
||||
/*GetValueId*/
|
||||
[&](const ValueInfo &VI) -> unsigned {
|
||||
std::optional<unsigned> ValueID = GetValueId(VI);
|
||||
// This can happen in shared index files for distributed ThinLTO if
|
||||
// the callee function summary is not included. Record 0 which we
|
||||
// will have to deal with conservatively when doing any kind of
|
||||
// validation in the ThinLTO backends.
|
||||
if (!ValueID)
|
||||
return 0;
|
||||
return *ValueID;
|
||||
},
|
||||
/*GetStackIndex*/
|
||||
[&](unsigned I) {
|
||||
// Get the corresponding index into the list of StackIds actually
|
||||
// being written for this combined index (which may be a subset in
|
||||
// the case of distributed indexes).
|
||||
assert(StackIdIndicesToIndex.contains(I));
|
||||
return StackIdIndicesToIndex[I];
|
||||
},
|
||||
/*WriteContextSizeInfoIndex*/ false, CallStackPos, CallStackCount);
|
||||
|
||||
MaybeEmitOriginalName(*S);
|
||||
});
|
||||
|
||||
|
||||
@@ -1057,7 +1057,7 @@ public:
|
||||
for (auto &I : FunctionCalleesToSynthesizedCallsiteInfos) {
|
||||
auto *FS = I.first;
|
||||
for (auto &Callsite : I.second)
|
||||
FS->addCallsite(*Callsite.second);
|
||||
FS->addCallsite(std::move(*Callsite.second));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
; RUN: opt -module-summary %s -o - | llvm-bcanalyzer -dump | FileCheck %s
|
||||
|
||||
; CHECK: <GLOBALVAL_SUMMARY_BLOCK
|
||||
; CHECK: <VERSION op0=12/>
|
||||
; CHECK: <VERSION op0=13/>
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user