[AtomicExpandPass] Preserve atomic and volatile nature of emulated operations (#188361)
The fix does the following in expandPartwordCmpXchg and insertRMWCmpXchgLoop. - Issues volatile operations in the emulation loops if the original operation is volatile. - A preheader load is used for initializing the "cmp" and "new" values of the cmpxchg in the loop. Makes this load atomic. This is done under a target hook (`issueAtomicInitLoadForAtomicEmulation()`) , to allow backends to migrate independently. - `processAtomicInstr` is called on this load, to massage it into something that can be lowered in SelectionDAG / GISel. - This caused 3 kinds of failures. 1. Caused by change to codegen: updated these either using the scripts, or mechanically (using claude) to match the new codegen. 2. Crashes caused by newly created atomic loads not being processed by AtomicExpandPass. (The atomic load if tested in an independent test does not cause a crash). To fix these, added recursive calls to processAtomicInstr on the newly created atomic loads. These calls convert the loads to libcalls, or cast them to integer types. 3. Crashes in X86, AMDGPU, and AArch64 caused by unhandled vector types. These loads crash even with upstream LLVM, due to the lack of support in these targets for vector atomic loads (the corresponding vector atomicrmw instructions are supported). Disabled issuing atomic loads for these backends. Will follow up with individual PRs to revert to default behavior.
This commit is contained in:
@@ -2301,6 +2301,14 @@ public:
|
||||
return AtomicOrdering::Monotonic;
|
||||
}
|
||||
|
||||
// Whether to issue an atomic load for the initial word value before the
|
||||
// atomicrmw/cmpxchg emulation loop.
|
||||
// TODO: For correctness, an atomic load should be issued for all targets.
|
||||
// Remove this API once this is achieved
|
||||
virtual bool shouldIssueAtomicLoadForAtomicEmulationLoop(void) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Perform a load-linked operation on Addr, returning a "Value *" with the
|
||||
/// corresponding pointee type. This may entail some non-trivial operations to
|
||||
/// truncate or reconstruct types that will be illegal in the backend. See
|
||||
|
||||
@@ -73,7 +73,7 @@ private:
|
||||
/// MetadataSrc)
|
||||
using CreateCmpXchgInstFun = function_ref<void(
|
||||
IRBuilderBase &, Value *, Value *, Value *, Align, AtomicOrdering,
|
||||
SyncScope::ID, Value *&, Value *&, Instruction *)>;
|
||||
SyncScope::ID, bool, Value *&, Value *&, Instruction *)>;
|
||||
|
||||
void handleFailure(Instruction &FailedInst, const Twine &Msg,
|
||||
Instruction *DiagnosticInst = nullptr) const {
|
||||
@@ -122,9 +122,9 @@ private:
|
||||
void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI);
|
||||
|
||||
AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
|
||||
static Value *insertRMWCmpXchgLoop(
|
||||
Value *insertRMWCmpXchgLoop(
|
||||
IRBuilderBase &Builder, Type *ResultType, Value *Addr, Align AddrAlign,
|
||||
AtomicOrdering MemOpOrder, SyncScope::ID SSID,
|
||||
AtomicOrdering MemOpOrder, SyncScope::ID SSID, bool IsVolatile,
|
||||
function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
|
||||
CreateCmpXchgInstFun CreateCmpXchg, Instruction *MetadataSrc);
|
||||
bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI);
|
||||
@@ -735,8 +735,8 @@ void AtomicExpandImpl::expandAtomicStoreToXChg(StoreInst *SI) {
|
||||
static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
|
||||
Value *Loaded, Value *NewVal, Align AddrAlign,
|
||||
AtomicOrdering MemOpOrder, SyncScope::ID SSID,
|
||||
Value *&Success, Value *&NewLoaded,
|
||||
Instruction *MetadataSrc) {
|
||||
bool IsVolatile, Value *&Success,
|
||||
Value *&NewLoaded, Instruction *MetadataSrc) {
|
||||
Type *OrigTy = NewVal->getType();
|
||||
|
||||
// This code can go away when cmpxchg supports FP and vector types.
|
||||
@@ -751,6 +751,7 @@ static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
|
||||
AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg(
|
||||
Addr, Loaded, NewVal, AddrAlign, MemOpOrder,
|
||||
AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder), SSID);
|
||||
Pair->setVolatile(IsVolatile);
|
||||
if (MetadataSrc)
|
||||
copyMetadataForAtomic(*Pair, *MetadataSrc);
|
||||
|
||||
@@ -1094,9 +1095,10 @@ void AtomicExpandImpl::expandPartwordAtomicRMW(
|
||||
|
||||
Value *OldResult;
|
||||
if (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg) {
|
||||
OldResult = insertRMWCmpXchgLoop(
|
||||
Builder, PMV.WordType, PMV.AlignedAddr, PMV.AlignedAddrAlignment,
|
||||
MemOpOrder, SSID, PerformPartwordOp, createCmpXchgInstFun, AI);
|
||||
OldResult = insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr,
|
||||
PMV.AlignedAddrAlignment, MemOpOrder, SSID,
|
||||
AI->isVolatile(), PerformPartwordOp,
|
||||
createCmpXchgInstFun, AI);
|
||||
} else {
|
||||
assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::LLSC);
|
||||
OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr,
|
||||
@@ -1215,7 +1217,6 @@ bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
|
||||
// Load the entire current word, and mask into place the expected and new
|
||||
// values
|
||||
LoadInst *InitLoaded = Builder.CreateLoad(PMV.WordType, PMV.AlignedAddr);
|
||||
InitLoaded->setVolatile(CI->isVolatile());
|
||||
Value *InitLoaded_MaskOut = Builder.CreateAnd(InitLoaded, PMV.Inv_Mask);
|
||||
Builder.CreateBr(LoopBB);
|
||||
|
||||
@@ -1224,6 +1225,20 @@ bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
|
||||
PHINode *Loaded_MaskOut = Builder.CreatePHI(PMV.WordType, 2);
|
||||
Loaded_MaskOut->addIncoming(InitLoaded_MaskOut, BB);
|
||||
|
||||
// The initial load must be atomic with the same synchronization scope
|
||||
// to avoid a data race with concurrent stores. If the instruction being
|
||||
// emulated is volatile, issue a volatile load.
|
||||
// addIncoming is done first so that any replaceAllUsesWith calls during
|
||||
// normalization correctly update the PHI incoming value.
|
||||
InitLoaded->setVolatile(CI->isVolatile());
|
||||
if (TLI->shouldIssueAtomicLoadForAtomicEmulationLoop()) {
|
||||
InitLoaded->setAtomic(AtomicOrdering::Monotonic, CI->getSyncScopeID());
|
||||
// The newly created load might need to be lowered further. Because it is
|
||||
// created in the same block as the atomicrmw, the AtomicExpand loop will
|
||||
// not process it again.
|
||||
processAtomicInstr(InitLoaded);
|
||||
}
|
||||
|
||||
// Mask/Or the expected and new values into place in the loaded word.
|
||||
Value *FullWord_NewVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shifted);
|
||||
Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted);
|
||||
@@ -1718,7 +1733,7 @@ bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
|
||||
|
||||
Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
|
||||
IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
|
||||
AtomicOrdering MemOpOrder, SyncScope::ID SSID,
|
||||
AtomicOrdering MemOpOrder, SyncScope::ID SSID, bool IsVolatile,
|
||||
function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
|
||||
CreateCmpXchgInstFun CreateCmpXchg, Instruction *MetadataSrc) {
|
||||
LLVMContext &Ctx = Builder.getContext();
|
||||
@@ -1750,9 +1765,6 @@ Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
|
||||
std::prev(BB->end())->eraseFromParent();
|
||||
Builder.SetInsertPoint(BB);
|
||||
LoadInst *InitLoaded = Builder.CreateAlignedLoad(ResultTy, Addr, AddrAlign);
|
||||
// TODO: The initial load must be atomic with the same synchronization scope
|
||||
// to avoid a data race with concurrent stores. If the instruction being
|
||||
// emulated is volatile, issue a volatile load.
|
||||
Builder.CreateBr(LoopBB);
|
||||
|
||||
// Start the main loop block now that we've taken care of the preliminaries.
|
||||
@@ -1760,6 +1772,20 @@ Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
|
||||
PHINode *Loaded = Builder.CreatePHI(ResultTy, 2, "loaded");
|
||||
Loaded->addIncoming(InitLoaded, BB);
|
||||
|
||||
// The initial load must be atomic with the same synchronization scope
|
||||
// to avoid a data race with concurrent stores. If the instruction being
|
||||
// emulated is volatile, issue a volatile load.
|
||||
// addIncoming is done first so that any replaceAllUsesWith calls during
|
||||
// normalization correctly update the PHI incoming value.
|
||||
InitLoaded->setVolatile(IsVolatile);
|
||||
if (TLI->shouldIssueAtomicLoadForAtomicEmulationLoop()) {
|
||||
InitLoaded->setAtomic(AtomicOrdering::Monotonic, SSID);
|
||||
// The newly created load might need to be lowered further. Because it is
|
||||
// created in the same block as the atomicrmw, the AtomicExpand loop will
|
||||
// not process it again.
|
||||
processAtomicInstr(InitLoaded);
|
||||
}
|
||||
|
||||
Value *NewVal = PerformOp(Builder, Loaded);
|
||||
|
||||
Value *NewLoaded = nullptr;
|
||||
@@ -1769,7 +1795,7 @@ Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
|
||||
MemOpOrder == AtomicOrdering::Unordered
|
||||
? AtomicOrdering::Monotonic
|
||||
: MemOpOrder,
|
||||
SSID, Success, NewLoaded, MetadataSrc);
|
||||
SSID, IsVolatile, Success, NewLoaded, MetadataSrc);
|
||||
assert(Success && NewLoaded);
|
||||
|
||||
Loaded->addIncoming(NewLoaded, LoopBB);
|
||||
@@ -1821,7 +1847,7 @@ bool AtomicExpandImpl::expandAtomicRMWToCmpXchg(
|
||||
// loop for the FP atomics.
|
||||
Value *Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
|
||||
Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(),
|
||||
AI->getOrdering(), AI->getSyncScopeID(),
|
||||
AI->getOrdering(), AI->getSyncScopeID(), AI->isVolatile(),
|
||||
[&](IRBuilderBase &Builder, Value *Loaded) {
|
||||
return buildAtomicRMWValue(AI->getOperation(), Builder, Loaded,
|
||||
AI->getValOperand());
|
||||
@@ -1984,12 +2010,13 @@ void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
|
||||
expandAtomicRMWToCmpXchg(
|
||||
I, [this, I](IRBuilderBase &Builder, Value *Addr, Value *Loaded,
|
||||
Value *NewVal, Align Alignment, AtomicOrdering MemOpOrder,
|
||||
SyncScope::ID SSID, Value *&Success, Value *&NewLoaded,
|
||||
Instruction *MetadataSrc) {
|
||||
SyncScope::ID SSID, bool IsVolatile, Value *&Success,
|
||||
Value *&NewLoaded, Instruction *MetadataSrc) {
|
||||
// Create the CAS instruction normally...
|
||||
AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg(
|
||||
Addr, Loaded, NewVal, Alignment, MemOpOrder,
|
||||
AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder), SSID);
|
||||
Pair->setVolatile(IsVolatile);
|
||||
if (MetadataSrc)
|
||||
copyMetadataForAtomic(*Pair, *MetadataSrc);
|
||||
|
||||
|
||||
@@ -352,6 +352,7 @@ public:
|
||||
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
||||
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
@@ -360,6 +361,10 @@ public:
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const override;
|
||||
|
||||
bool shouldIssueAtomicLoadForAtomicEmulationLoop() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool useLoadStackGuardNode(const Module &M) const override;
|
||||
TargetLoweringBase::LegalizeTypeAction
|
||||
getPreferredVectorAction(MVT VT) const override;
|
||||
|
||||
@@ -255,6 +255,10 @@ public:
|
||||
return AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
bool shouldIssueAtomicLoadForAtomicEmulationLoop() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
|
||||
static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
|
||||
|
||||
|
||||
@@ -82,6 +82,10 @@ public:
|
||||
shouldExpandAtomicRMWInIR(const AtomicRMWInst *RMW) const override;
|
||||
AtomicExpansionKind
|
||||
shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const override;
|
||||
|
||||
bool shouldIssueAtomicLoadForAtomicEmulationLoop() const override {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
} // namespace llvm
|
||||
|
||||
|
||||
@@ -882,6 +882,7 @@ namespace llvm {
|
||||
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
||||
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
@@ -894,6 +895,10 @@ namespace llvm {
|
||||
LoadInst *
|
||||
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
|
||||
|
||||
bool shouldIssueAtomicLoadForAtomicEmulationLoop() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool needsCmpXchgNb(Type *MemType) const;
|
||||
|
||||
void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,84 +8,153 @@
|
||||
;; frame pointer and a base pointer to be needed.
|
||||
|
||||
define void @test(ptr %ptr) {
|
||||
; CHECK-LABEL: test:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: .save {r4, r5, r6, r8, r9, r10, r11, lr}
|
||||
; CHECK-NEXT: push {r4, r5, r6, r8, r9, r10, r11, lr}
|
||||
; CHECK-NEXT: .setfp r11, sp, #24
|
||||
; CHECK-NEXT: add r11, sp, #24
|
||||
; CHECK-NEXT: .pad #32
|
||||
; CHECK-NEXT: sub sp, sp, #32
|
||||
; CHECK-NEXT: bfc sp, #0, #4
|
||||
; CHECK-NEXT: mov r6, sp
|
||||
; CHECK-NEXT: str r0, [r6, #28] @ 4-byte Spill
|
||||
; CHECK-NEXT: b .LBB0_1
|
||||
; CHECK-NEXT: .LBB0_1: @ %block1
|
||||
; CHECK-NEXT: ldr r0, [r6, #28] @ 4-byte Reload
|
||||
; CHECK-NEXT: mov r1, sp
|
||||
; CHECK-NEXT: sub r1, r1, #16
|
||||
; CHECK-NEXT: bic r1, r1, #15
|
||||
; CHECK-NEXT: mov sp, r1
|
||||
; CHECK-NEXT: dmb ish
|
||||
; CHECK-NEXT: ldr r1, [r0]
|
||||
; CHECK-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-NEXT: str r1, [r6, #20] @ 4-byte Spill
|
||||
; CHECK-NEXT: str r0, [r6, #24] @ 4-byte Spill
|
||||
; CHECK-NEXT: b .LBB0_2
|
||||
; CHECK-NEXT: .LBB0_2: @ %atomicrmw.start
|
||||
; CHECK-NEXT: @ =>This Loop Header: Depth=1
|
||||
; CHECK-NEXT: @ Child Loop BB0_3 Depth 2
|
||||
; CHECK-NEXT: ldr r2, [r6, #24] @ 4-byte Reload
|
||||
; CHECK-NEXT: ldr r0, [r6, #20] @ 4-byte Reload
|
||||
; CHECK-NEXT: ldr r8, [r6, #28] @ 4-byte Reload
|
||||
; LE-NEXT: str r2, [r6, #16] @ 4-byte Spill
|
||||
; LE-NEXT: str r0, [r6, #12] @ 4-byte Spill
|
||||
; BE-NEXT: str r2, [r6, #12] @ 4-byte Spill
|
||||
; BE-NEXT: str r0, [r6, #16] @ 4-byte Spill
|
||||
; CHECK-NEXT: @ implicit-def: $r1
|
||||
; CHECK-NEXT: @ implicit-def: $r3
|
||||
; CHECK-NEXT: @ kill: def $r8 killed $r8 def $r8_r9
|
||||
; CHECK-NEXT: mov r9, r1
|
||||
; CHECK-NEXT: @ kill: def $r0 killed $r0 def $r0_r1
|
||||
; CHECK-NEXT: mov r1, r2
|
||||
; CHECK-NEXT: mov r12, #0
|
||||
; CHECK-NEXT: mov r2, r12
|
||||
; CHECK-NEXT: mov r3, r12
|
||||
; CHECK-NEXT: .LBB0_3: @ %atomicrmw.start
|
||||
; CHECK-NEXT: @ Parent Loop BB0_2 Depth=1
|
||||
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
|
||||
; CHECK-NEXT: ldrexd r4, r5, [r8]
|
||||
; CHECK-NEXT: cmp r4, r0
|
||||
; CHECK-NEXT: cmpeq r5, r1
|
||||
; CHECK-NEXT: bne .LBB0_5
|
||||
; CHECK-NEXT: @ %bb.4: @ %atomicrmw.start
|
||||
; CHECK-NEXT: @ in Loop: Header=BB0_3 Depth=2
|
||||
; CHECK-NEXT: strexd r9, r2, r3, [r8]
|
||||
; CHECK-NEXT: cmp r9, #0
|
||||
; CHECK-NEXT: bne .LBB0_3
|
||||
; CHECK-NEXT: .LBB0_5: @ %atomicrmw.start
|
||||
; CHECK-NEXT: @ in Loop: Header=BB0_2 Depth=1
|
||||
; CHECK-NEXT: ldr r2, [r6, #12] @ 4-byte Reload
|
||||
; LE-NEXT: ldr r1, [r6, #16] @ 4-byte Reload
|
||||
; LE-NEXT: mov r0, r5
|
||||
; LE-NEXT: eor r3, r0, r1
|
||||
; LE-NEXT: mov r1, r4
|
||||
; LE-NEXT: eor r2, r1, r2
|
||||
; BE-NEXT: ldr r0, [r6, #16] @ 4-byte Reload
|
||||
; BE-NEXT: mov r1, r4
|
||||
; BE-NEXT: eor r3, r1, r0
|
||||
; BE-NEXT: mov r0, r5
|
||||
; BE-NEXT: eor r2, r0, r2
|
||||
; CHECK-NEXT: orr r2, r2, r3
|
||||
; CHECK-NEXT: cmp r2, #0
|
||||
; CHECK-NEXT: str r1, [r6, #20] @ 4-byte Spill
|
||||
; CHECK-NEXT: str r0, [r6, #24] @ 4-byte Spill
|
||||
; CHECK-NEXT: bne .LBB0_2
|
||||
; CHECK-NEXT: b .LBB0_6
|
||||
; CHECK-NEXT: .LBB0_6: @ %atomicrmw.end
|
||||
; CHECK-NEXT: dmb ish
|
||||
; CHECK-NEXT: sub sp, r11, #24
|
||||
; CHECK-NEXT: pop {r4, r5, r6, r8, r9, r10, r11, pc}
|
||||
; LE-LABEL: test:
|
||||
; LE: @ %bb.0: @ %entry
|
||||
; LE-NEXT: .save {r4, r5, r6, r8, r9, r10, r11, lr}
|
||||
; LE-NEXT: push {r4, r5, r6, r8, r9, r10, r11, lr}
|
||||
; LE-NEXT: .setfp r11, sp, #24
|
||||
; LE-NEXT: add r11, sp, #24
|
||||
; LE-NEXT: .pad #32
|
||||
; LE-NEXT: sub sp, sp, #32
|
||||
; LE-NEXT: bfc sp, #0, #4
|
||||
; LE-NEXT: mov r6, sp
|
||||
; LE-NEXT: str r0, [r6, #28] @ 4-byte Spill
|
||||
; LE-NEXT: b .LBB0_1
|
||||
; LE-NEXT: .LBB0_1: @ %block1
|
||||
; LE-NEXT: ldr r0, [r6, #28] @ 4-byte Reload
|
||||
; LE-NEXT: mov r1, sp
|
||||
; LE-NEXT: sub r1, r1, #16
|
||||
; LE-NEXT: bic r1, r1, #15
|
||||
; LE-NEXT: mov sp, r1
|
||||
; LE-NEXT: dmb ish
|
||||
; LE-NEXT: ldrexd r2, r3, [r0]
|
||||
; LE-NEXT: mov r0, r3
|
||||
; LE-NEXT: mov r1, r2
|
||||
; LE-NEXT: clrex
|
||||
; LE-NEXT: str r1, [r6, #20] @ 4-byte Spill
|
||||
; LE-NEXT: str r0, [r6, #24] @ 4-byte Spill
|
||||
; LE-NEXT: b .LBB0_2
|
||||
; LE-NEXT: .LBB0_2: @ %atomicrmw.start
|
||||
; LE-NEXT: @ =>This Loop Header: Depth=1
|
||||
; LE-NEXT: @ Child Loop BB0_3 Depth 2
|
||||
; LE-NEXT: ldr r2, [r6, #24] @ 4-byte Reload
|
||||
; LE-NEXT: ldr r0, [r6, #20] @ 4-byte Reload
|
||||
; LE-NEXT: ldr r8, [r6, #28] @ 4-byte Reload
|
||||
; LE-NEXT: str r2, [r6, #16] @ 4-byte Spill
|
||||
; LE-NEXT: str r0, [r6, #12] @ 4-byte Spill
|
||||
; LE-NEXT: @ implicit-def: $r1
|
||||
; LE-NEXT: @ implicit-def: $r3
|
||||
; LE-NEXT: @ kill: def $r8 killed $r8 def $r8_r9
|
||||
; LE-NEXT: mov r9, r1
|
||||
; LE-NEXT: @ kill: def $r0 killed $r0 def $r0_r1
|
||||
; LE-NEXT: mov r1, r2
|
||||
; LE-NEXT: mov r12, #0
|
||||
; LE-NEXT: mov r2, r12
|
||||
; LE-NEXT: mov r3, r12
|
||||
; LE-NEXT: .LBB0_3: @ %atomicrmw.start
|
||||
; LE-NEXT: @ Parent Loop BB0_2 Depth=1
|
||||
; LE-NEXT: @ => This Inner Loop Header: Depth=2
|
||||
; LE-NEXT: ldrexd r4, r5, [r8]
|
||||
; LE-NEXT: cmp r4, r0
|
||||
; LE-NEXT: cmpeq r5, r1
|
||||
; LE-NEXT: bne .LBB0_5
|
||||
; LE-NEXT: @ %bb.4: @ %atomicrmw.start
|
||||
; LE-NEXT: @ in Loop: Header=BB0_3 Depth=2
|
||||
; LE-NEXT: strexd r9, r2, r3, [r8]
|
||||
; LE-NEXT: cmp r9, #0
|
||||
; LE-NEXT: bne .LBB0_3
|
||||
; LE-NEXT: .LBB0_5: @ %atomicrmw.start
|
||||
; LE-NEXT: @ in Loop: Header=BB0_2 Depth=1
|
||||
; LE-NEXT: ldr r2, [r6, #12] @ 4-byte Reload
|
||||
; LE-NEXT: ldr r1, [r6, #16] @ 4-byte Reload
|
||||
; LE-NEXT: mov r0, r5
|
||||
; LE-NEXT: eor r3, r0, r1
|
||||
; LE-NEXT: mov r1, r4
|
||||
; LE-NEXT: eor r2, r1, r2
|
||||
; LE-NEXT: orr r2, r2, r3
|
||||
; LE-NEXT: cmp r2, #0
|
||||
; LE-NEXT: str r1, [r6, #20] @ 4-byte Spill
|
||||
; LE-NEXT: str r0, [r6, #24] @ 4-byte Spill
|
||||
; LE-NEXT: bne .LBB0_2
|
||||
; LE-NEXT: b .LBB0_6
|
||||
; LE-NEXT: .LBB0_6: @ %atomicrmw.end
|
||||
; LE-NEXT: dmb ish
|
||||
; LE-NEXT: sub sp, r11, #24
|
||||
; LE-NEXT: pop {r4, r5, r6, r8, r9, r10, r11, pc}
|
||||
;
|
||||
; BE-LABEL: test:
|
||||
; BE: @ %bb.0: @ %entry
|
||||
; BE-NEXT: .save {r4, r5, r6, r8, r9, r10, r11, lr}
|
||||
; BE-NEXT: push {r4, r5, r6, r8, r9, r10, r11, lr}
|
||||
; BE-NEXT: .setfp r11, sp, #24
|
||||
; BE-NEXT: add r11, sp, #24
|
||||
; BE-NEXT: .pad #32
|
||||
; BE-NEXT: sub sp, sp, #32
|
||||
; BE-NEXT: bfc sp, #0, #4
|
||||
; BE-NEXT: mov r6, sp
|
||||
; BE-NEXT: str r0, [r6, #28] @ 4-byte Spill
|
||||
; BE-NEXT: b .LBB0_1
|
||||
; BE-NEXT: .LBB0_1: @ %block1
|
||||
; BE-NEXT: ldr r0, [r6, #28] @ 4-byte Reload
|
||||
; BE-NEXT: mov r1, sp
|
||||
; BE-NEXT: sub r1, r1, #16
|
||||
; BE-NEXT: bic r1, r1, #15
|
||||
; BE-NEXT: mov sp, r1
|
||||
; BE-NEXT: dmb ish
|
||||
; BE-NEXT: ldrexd r2, r3, [r0]
|
||||
; BE-NEXT: mov r0, r3
|
||||
; BE-NEXT: mov r1, r2
|
||||
; BE-NEXT: clrex
|
||||
; BE-NEXT: str r1, [r6, #20] @ 4-byte Spill
|
||||
; BE-NEXT: str r0, [r6, #24] @ 4-byte Spill
|
||||
; BE-NEXT: b .LBB0_2
|
||||
; BE-NEXT: .LBB0_2: @ %atomicrmw.start
|
||||
; BE-NEXT: @ =>This Loop Header: Depth=1
|
||||
; BE-NEXT: @ Child Loop BB0_3 Depth 2
|
||||
; BE-NEXT: ldr r2, [r6, #24] @ 4-byte Reload
|
||||
; BE-NEXT: ldr r0, [r6, #20] @ 4-byte Reload
|
||||
; BE-NEXT: ldr r8, [r6, #28] @ 4-byte Reload
|
||||
; BE-NEXT: str r2, [r6, #12] @ 4-byte Spill
|
||||
; BE-NEXT: str r0, [r6, #16] @ 4-byte Spill
|
||||
; BE-NEXT: @ implicit-def: $r1
|
||||
; BE-NEXT: @ implicit-def: $r3
|
||||
; BE-NEXT: @ kill: def $r8 killed $r8 def $r8_r9
|
||||
; BE-NEXT: mov r9, r1
|
||||
; BE-NEXT: @ kill: def $r0 killed $r0 def $r0_r1
|
||||
; BE-NEXT: mov r1, r2
|
||||
; BE-NEXT: mov r12, #0
|
||||
; BE-NEXT: mov r2, r12
|
||||
; BE-NEXT: mov r3, r12
|
||||
; BE-NEXT: .LBB0_3: @ %atomicrmw.start
|
||||
; BE-NEXT: @ Parent Loop BB0_2 Depth=1
|
||||
; BE-NEXT: @ => This Inner Loop Header: Depth=2
|
||||
; BE-NEXT: ldrexd r4, r5, [r8]
|
||||
; BE-NEXT: cmp r4, r0
|
||||
; BE-NEXT: cmpeq r5, r1
|
||||
; BE-NEXT: bne .LBB0_5
|
||||
; BE-NEXT: @ %bb.4: @ %atomicrmw.start
|
||||
; BE-NEXT: @ in Loop: Header=BB0_3 Depth=2
|
||||
; BE-NEXT: strexd r9, r2, r3, [r8]
|
||||
; BE-NEXT: cmp r9, #0
|
||||
; BE-NEXT: bne .LBB0_3
|
||||
; BE-NEXT: .LBB0_5: @ %atomicrmw.start
|
||||
; BE-NEXT: @ in Loop: Header=BB0_2 Depth=1
|
||||
; BE-NEXT: ldr r2, [r6, #12] @ 4-byte Reload
|
||||
; BE-NEXT: ldr r0, [r6, #16] @ 4-byte Reload
|
||||
; BE-NEXT: mov r1, r4
|
||||
; BE-NEXT: eor r3, r1, r0
|
||||
; BE-NEXT: mov r0, r5
|
||||
; BE-NEXT: eor r2, r0, r2
|
||||
; BE-NEXT: orr r2, r2, r3
|
||||
; BE-NEXT: cmp r2, #0
|
||||
; BE-NEXT: str r1, [r6, #20] @ 4-byte Spill
|
||||
; BE-NEXT: str r0, [r6, #24] @ 4-byte Spill
|
||||
; BE-NEXT: bne .LBB0_2
|
||||
; BE-NEXT: b .LBB0_6
|
||||
; BE-NEXT: .LBB0_6: @ %atomicrmw.end
|
||||
; BE-NEXT: dmb ish
|
||||
; BE-NEXT: sub sp, r11, #24
|
||||
; BE-NEXT: pop {r4, r5, r6, r8, r9, r10, r11, pc}
|
||||
entry:
|
||||
br label %block1
|
||||
|
||||
@@ -94,3 +163,5 @@ block1:
|
||||
store atomic i64 0, ptr %ptr seq_cst, align 8
|
||||
ret void
|
||||
}
|
||||
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
||||
; CHECK: {{.*}}
|
||||
|
||||
@@ -324,17 +324,19 @@ define void @test_old_store_64bit(ptr %p, i64 %v) {
|
||||
;
|
||||
; ARMOPTNONE-LABEL: test_old_store_64bit:
|
||||
; ARMOPTNONE: @ %bb.0:
|
||||
; ARMOPTNONE-NEXT: push {r4, r5, r7, r8, r10, r11, lr}
|
||||
; ARMOPTNONE-NEXT: add r7, sp, #20
|
||||
; ARMOPTNONE-NEXT: sub sp, sp, #24
|
||||
; ARMOPTNONE-NEXT: str r0, [sp, #4] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: str r2, [sp, #8] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: str r1, [sp, #12] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: dmb ish
|
||||
; ARMOPTNONE-NEXT: ldr r1, [r0]
|
||||
; ARMOPTNONE-NEXT: ldr r0, [r0, #4]
|
||||
; ARMOPTNONE-NEXT: str r1, [sp, #16] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: str r0, [sp, #20] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: push {r4, r5, r7, r8, r10, r11, lr}
|
||||
; ARMOPTNONE-NEXT: add r7, sp, #20
|
||||
; ARMOPTNONE-NEXT: sub sp, sp, #24
|
||||
; ARMOPTNONE-NEXT: str r0, [sp, #4] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: str r2, [sp, #8] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: str r1, [sp, #12] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: dmb ish
|
||||
; ARMOPTNONE-NEXT: ldrexd r2, r3, [r0]
|
||||
; ARMOPTNONE-NEXT: mov r0, r3
|
||||
; ARMOPTNONE-NEXT: mov r1, r2
|
||||
; ARMOPTNONE-NEXT: clrex
|
||||
; ARMOPTNONE-NEXT: str r1, [sp, #16] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: str r0, [sp, #20] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: b LBB5_1
|
||||
; ARMOPTNONE-NEXT: LBB5_1: @ %atomicrmw.start
|
||||
; ARMOPTNONE-NEXT: @ =>This Loop Header: Depth=1
|
||||
@@ -381,7 +383,7 @@ define void @test_old_store_64bit(ptr %p, i64 %v) {
|
||||
; ARMOPTNONE-NEXT: LBB5_5: @ %atomicrmw.end
|
||||
; ARMOPTNONE-NEXT: dmb ish
|
||||
; ARMOPTNONE-NEXT: sub sp, r7, #20
|
||||
; ARMOPTNONE-NEXT: pop {r4, r5, r7, r8, r10, r11, pc}
|
||||
; ARMOPTNONE-NEXT: pop {r4, r5, r7, r8, r10, r11, pc}
|
||||
;
|
||||
; THUMBTWO-LABEL: test_old_store_64bit:
|
||||
; THUMBTWO: @ %bb.0:
|
||||
@@ -862,19 +864,21 @@ define void @store_atomic_f64__seq_cst(ptr %ptr, double %val1) {
|
||||
;
|
||||
; ARMOPTNONE-LABEL: store_atomic_f64__seq_cst:
|
||||
; ARMOPTNONE: @ %bb.0:
|
||||
; ARMOPTNONE-NEXT: push {r4, r5, r7, r8, r10, r11, lr}
|
||||
; ARMOPTNONE-NEXT: add r7, sp, #20
|
||||
; ARMOPTNONE-NEXT: sub sp, sp, #24
|
||||
; ARMOPTNONE-NEXT: str r0, [sp, #4] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: vmov d16, r1, r2
|
||||
; ARMOPTNONE-NEXT: vmov r1, r2, d16
|
||||
; ARMOPTNONE-NEXT: str r2, [sp, #8] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: str r1, [sp, #12] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: dmb ish
|
||||
; ARMOPTNONE-NEXT: ldr r1, [r0]
|
||||
; ARMOPTNONE-NEXT: ldr r0, [r0, #4]
|
||||
; ARMOPTNONE-NEXT: str r1, [sp, #16] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: str r0, [sp, #20] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: push {r4, r5, r7, r8, r10, r11, lr}
|
||||
; ARMOPTNONE-NEXT: add r7, sp, #20
|
||||
; ARMOPTNONE-NEXT: sub sp, sp, #24
|
||||
; ARMOPTNONE-NEXT: str r0, [sp, #4] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: vmov d16, r1, r2
|
||||
; ARMOPTNONE-NEXT: vmov r1, r2, d16
|
||||
; ARMOPTNONE-NEXT: str r2, [sp, #8] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: str r1, [sp, #12] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: dmb ish
|
||||
; ARMOPTNONE-NEXT: ldrexd r2, r3, [r0]
|
||||
; ARMOPTNONE-NEXT: mov r0, r3
|
||||
; ARMOPTNONE-NEXT: mov r1, r2
|
||||
; ARMOPTNONE-NEXT: clrex
|
||||
; ARMOPTNONE-NEXT: str r1, [sp, #16] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: str r0, [sp, #20] @ 4-byte Spill
|
||||
; ARMOPTNONE-NEXT: b LBB13_1
|
||||
; ARMOPTNONE-NEXT: LBB13_1: @ %atomicrmw.start
|
||||
; ARMOPTNONE-NEXT: @ =>This Loop Header: Depth=1
|
||||
@@ -921,7 +925,7 @@ define void @store_atomic_f64__seq_cst(ptr %ptr, double %val1) {
|
||||
; ARMOPTNONE-NEXT: LBB13_5: @ %atomicrmw.end
|
||||
; ARMOPTNONE-NEXT: dmb ish
|
||||
; ARMOPTNONE-NEXT: sub sp, r7, #20
|
||||
; ARMOPTNONE-NEXT: pop {r4, r5, r7, r8, r10, r11, pc}
|
||||
; ARMOPTNONE-NEXT: pop {r4, r5, r7, r8, r10, r11, pc}
|
||||
;
|
||||
; THUMBTWO-LABEL: store_atomic_f64__seq_cst:
|
||||
; THUMBTWO: @ %bb.0:
|
||||
|
||||
@@ -6771,8 +6771,10 @@ define i64 @test_xchg_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB33_1
|
||||
@@ -6832,8 +6834,9 @@ define i64 @test_xchg_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI33_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB33_1
|
||||
@@ -6897,8 +6900,8 @@ define i64 @test_xchg_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB33_1
|
||||
@@ -6993,8 +6996,10 @@ define i64 @test_add_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB34_1
|
||||
@@ -7054,8 +7059,9 @@ define i64 @test_add_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI34_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB34_1
|
||||
@@ -7119,8 +7125,8 @@ define i64 @test_add_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB34_1
|
||||
@@ -7215,8 +7221,10 @@ define i64 @test_sub_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB35_1
|
||||
@@ -7276,8 +7284,9 @@ define i64 @test_sub_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI35_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB35_1
|
||||
@@ -7341,8 +7350,8 @@ define i64 @test_sub_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB35_1
|
||||
@@ -7437,8 +7446,10 @@ define i64 @test_and_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB36_1
|
||||
@@ -7498,8 +7509,9 @@ define i64 @test_and_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI36_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB36_1
|
||||
@@ -7563,8 +7575,8 @@ define i64 @test_and_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB36_1
|
||||
@@ -7659,8 +7671,10 @@ define i64 @test_nand_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB37_1
|
||||
@@ -7722,8 +7736,9 @@ define i64 @test_nand_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI37_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB37_1
|
||||
@@ -7789,8 +7804,8 @@ define i64 @test_nand_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB37_1
|
||||
@@ -7886,8 +7901,10 @@ define i64 @test_or_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB38_1
|
||||
@@ -7946,8 +7963,9 @@ define i64 @test_or_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI38_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB38_1
|
||||
@@ -8010,8 +8028,8 @@ define i64 @test_or_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB38_1
|
||||
@@ -8105,8 +8123,10 @@ define i64 @test_xor_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB39_1
|
||||
@@ -8165,8 +8185,9 @@ define i64 @test_xor_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI39_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB39_1
|
||||
@@ -8229,8 +8250,8 @@ define i64 @test_xor_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB39_1
|
||||
@@ -8325,8 +8346,10 @@ define i64 @test_max_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB40_1
|
||||
@@ -8392,8 +8415,9 @@ define i64 @test_max_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI40_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB40_1
|
||||
@@ -8463,8 +8487,8 @@ define i64 @test_max_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB40_1
|
||||
@@ -8547,12 +8571,12 @@ define i64 @test_max_i64() {
|
||||
; CHECK-THUMB8BASE-NEXT: push {r4, lr}
|
||||
; CHECK-THUMB8BASE-NEXT: .pad #72
|
||||
; CHECK-THUMB8BASE-NEXT: sub sp, #72
|
||||
; CHECK-THUMB8BASE-NEXT: movw r1, :lower16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movt r1, :upper16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: ldr r0, [r1, #4]
|
||||
; CHECK-THUMB8BASE-NEXT: ldr r1, [r1]
|
||||
; CHECK-THUMB8BASE-NEXT: str r1, [sp, #56] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: str r0, [sp, #60] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movs r1, #0
|
||||
; CHECK-THUMB8BASE-NEXT: bl __atomic_load_8
|
||||
; CHECK-THUMB8BASE-NEXT: str r0, [sp, #56] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: str r1, [sp, #60] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: b .LBB40_1
|
||||
; CHECK-THUMB8BASE-NEXT: .LBB40_1: @ %atomicrmw.start
|
||||
; CHECK-THUMB8BASE-NEXT: @ =>This Inner Loop Header: Depth=1
|
||||
@@ -8638,8 +8662,10 @@ define i64 @test_min_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB41_1
|
||||
@@ -8705,8 +8731,9 @@ define i64 @test_min_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI41_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB41_1
|
||||
@@ -8776,8 +8803,8 @@ define i64 @test_min_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB41_1
|
||||
@@ -8860,12 +8887,12 @@ define i64 @test_min_i64() {
|
||||
; CHECK-THUMB8BASE-NEXT: push {r4, lr}
|
||||
; CHECK-THUMB8BASE-NEXT: .pad #72
|
||||
; CHECK-THUMB8BASE-NEXT: sub sp, #72
|
||||
; CHECK-THUMB8BASE-NEXT: movw r1, :lower16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movt r1, :upper16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: ldr r0, [r1, #4]
|
||||
; CHECK-THUMB8BASE-NEXT: ldr r1, [r1]
|
||||
; CHECK-THUMB8BASE-NEXT: str r1, [sp, #56] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: str r0, [sp, #60] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movs r1, #0
|
||||
; CHECK-THUMB8BASE-NEXT: bl __atomic_load_8
|
||||
; CHECK-THUMB8BASE-NEXT: str r0, [sp, #56] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: str r1, [sp, #60] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: b .LBB41_1
|
||||
; CHECK-THUMB8BASE-NEXT: .LBB41_1: @ %atomicrmw.start
|
||||
; CHECK-THUMB8BASE-NEXT: @ =>This Inner Loop Header: Depth=1
|
||||
@@ -8951,8 +8978,10 @@ define i64 @test_umax_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB42_1
|
||||
@@ -9018,8 +9047,9 @@ define i64 @test_umax_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI42_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB42_1
|
||||
@@ -9089,8 +9119,8 @@ define i64 @test_umax_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB42_1
|
||||
@@ -9173,12 +9203,12 @@ define i64 @test_umax_i64() {
|
||||
; CHECK-THUMB8BASE-NEXT: push {r4, lr}
|
||||
; CHECK-THUMB8BASE-NEXT: .pad #72
|
||||
; CHECK-THUMB8BASE-NEXT: sub sp, #72
|
||||
; CHECK-THUMB8BASE-NEXT: movw r1, :lower16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movt r1, :upper16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: ldr r0, [r1, #4]
|
||||
; CHECK-THUMB8BASE-NEXT: ldr r1, [r1]
|
||||
; CHECK-THUMB8BASE-NEXT: str r1, [sp, #56] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: str r0, [sp, #60] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movs r1, #0
|
||||
; CHECK-THUMB8BASE-NEXT: bl __atomic_load_8
|
||||
; CHECK-THUMB8BASE-NEXT: str r0, [sp, #56] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: str r1, [sp, #60] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: b .LBB42_1
|
||||
; CHECK-THUMB8BASE-NEXT: .LBB42_1: @ %atomicrmw.start
|
||||
; CHECK-THUMB8BASE-NEXT: @ =>This Inner Loop Header: Depth=1
|
||||
@@ -9264,8 +9294,10 @@ define i64 @test_umin_i64() {
|
||||
; CHECK-ARM8-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM8-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-ARM8-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM8-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM8-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM8-NEXT: mov r0, r3
|
||||
; CHECK-ARM8-NEXT: mov r1, r2
|
||||
; CHECK-ARM8-NEXT: clrex
|
||||
; CHECK-ARM8-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM8-NEXT: b .LBB43_1
|
||||
@@ -9331,8 +9363,9 @@ define i64 @test_umin_i64() {
|
||||
; CHECK-ARM6-NEXT: .pad #16
|
||||
; CHECK-ARM6-NEXT: sub sp, sp, #16
|
||||
; CHECK-ARM6-NEXT: ldr r0, .LCPI43_0
|
||||
; CHECK-ARM6-NEXT: ldr r1, [r0]
|
||||
; CHECK-ARM6-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-ARM6-NEXT: ldrexd r2, r3, [r0]
|
||||
; CHECK-ARM6-NEXT: mov r0, r3
|
||||
; CHECK-ARM6-NEXT: mov r1, r2
|
||||
; CHECK-ARM6-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-ARM6-NEXT: b .LBB43_1
|
||||
@@ -9402,8 +9435,8 @@ define i64 @test_umin_i64() {
|
||||
; CHECK-THUMB7-NEXT: sub sp, #16
|
||||
; CHECK-THUMB7-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB7-NEXT: ldr r1, [r0]
|
||||
; CHECK-THUMB7-NEXT: ldr r0, [r0, #4]
|
||||
; CHECK-THUMB7-NEXT: ldrexd r1, r0, [r0]
|
||||
; CHECK-THUMB7-NEXT: clrex
|
||||
; CHECK-THUMB7-NEXT: str r1, [sp, #8] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: str r0, [sp, #12] @ 4-byte Spill
|
||||
; CHECK-THUMB7-NEXT: b .LBB43_1
|
||||
@@ -9486,12 +9519,12 @@ define i64 @test_umin_i64() {
|
||||
; CHECK-THUMB8BASE-NEXT: push {r4, lr}
|
||||
; CHECK-THUMB8BASE-NEXT: .pad #72
|
||||
; CHECK-THUMB8BASE-NEXT: sub sp, #72
|
||||
; CHECK-THUMB8BASE-NEXT: movw r1, :lower16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movt r1, :upper16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: ldr r0, [r1, #4]
|
||||
; CHECK-THUMB8BASE-NEXT: ldr r1, [r1]
|
||||
; CHECK-THUMB8BASE-NEXT: str r1, [sp, #56] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: str r0, [sp, #60] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: movw r0, :lower16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movt r0, :upper16:atomic_i64
|
||||
; CHECK-THUMB8BASE-NEXT: movs r1, #0
|
||||
; CHECK-THUMB8BASE-NEXT: bl __atomic_load_8
|
||||
; CHECK-THUMB8BASE-NEXT: str r0, [sp, #56] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: str r1, [sp, #60] @ 4-byte Spill
|
||||
; CHECK-THUMB8BASE-NEXT: b .LBB43_1
|
||||
; CHECK-THUMB8BASE-NEXT: .LBB43_1: @ %atomicrmw.start
|
||||
; CHECK-THUMB8BASE-NEXT: @ =>This Inner Loop Header: Depth=1
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
define float @float_fadd_acquire(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fadd_acquire:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -38,7 +39,8 @@ define float @float_fadd_acquire(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fadd_acquire:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB0_1: # %atomicrmw.start
|
||||
@@ -74,7 +76,8 @@ define float @float_fadd_acquire(ptr %p) nounwind {
|
||||
define float @float_fsub_acquire(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fsub_acquire:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, -264192
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -107,7 +110,8 @@ define float @float_fsub_acquire(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fsub_acquire:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1040
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB1_1: # %atomicrmw.start
|
||||
@@ -143,7 +147,8 @@ define float @float_fsub_acquire(ptr %p) nounwind {
|
||||
define float @float_fmin_acquire(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fmin_acquire:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -176,7 +181,8 @@ define float @float_fmin_acquire(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fmin_acquire:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB2_1: # %atomicrmw.start
|
||||
@@ -212,7 +218,8 @@ define float @float_fmin_acquire(ptr %p) nounwind {
|
||||
define float @float_fmax_acquire(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fmax_acquire:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -245,7 +252,8 @@ define float @float_fmax_acquire(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fmax_acquire:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB3_1: # %atomicrmw.start
|
||||
@@ -287,7 +295,13 @@ define double @double_fadd_acquire(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB4_1: # %atomicrmw.start
|
||||
@@ -323,7 +337,13 @@ define double @double_fadd_acquire(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB4_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -359,7 +379,13 @@ define double @double_fsub_acquire(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, -1025
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB5_1: # %atomicrmw.start
|
||||
@@ -395,7 +421,13 @@ define double @double_fsub_acquire(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB5_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -431,7 +463,13 @@ define double @double_fmin_acquire(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
@@ -467,7 +505,13 @@ define double @double_fmin_acquire(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -503,7 +547,13 @@ define double @double_fmax_acquire(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
@@ -539,7 +589,13 @@ define double @double_fmax_acquire(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -569,7 +625,8 @@ define double @double_fmax_acquire(ptr %p) nounwind {
|
||||
define float @float_fadd_release(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fadd_release:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -602,7 +659,8 @@ define float @float_fadd_release(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fadd_release:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB8_1: # %atomicrmw.start
|
||||
@@ -638,7 +696,8 @@ define float @float_fadd_release(ptr %p) nounwind {
|
||||
define float @float_fsub_release(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fsub_release:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, -264192
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -671,7 +730,8 @@ define float @float_fsub_release(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fsub_release:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1040
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB9_1: # %atomicrmw.start
|
||||
@@ -707,7 +767,8 @@ define float @float_fsub_release(ptr %p) nounwind {
|
||||
define float @float_fmin_release(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fmin_release:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -740,7 +801,8 @@ define float @float_fmin_release(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fmin_release:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB10_1: # %atomicrmw.start
|
||||
@@ -776,7 +838,8 @@ define float @float_fmin_release(ptr %p) nounwind {
|
||||
define float @float_fmax_release(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fmax_release:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -809,7 +872,8 @@ define float @float_fmax_release(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fmax_release:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB11_1: # %atomicrmw.start
|
||||
@@ -851,7 +915,13 @@ define double @double_fadd_release(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB12_1: # %atomicrmw.start
|
||||
@@ -887,7 +957,13 @@ define double @double_fadd_release(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB12_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -923,7 +999,13 @@ define double @double_fsub_release(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, -1025
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB13_1: # %atomicrmw.start
|
||||
@@ -959,7 +1041,13 @@ define double @double_fsub_release(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB13_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -995,7 +1083,13 @@ define double @double_fmin_release(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB14_1: # %atomicrmw.start
|
||||
@@ -1031,7 +1125,13 @@ define double @double_fmin_release(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB14_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -1067,7 +1167,13 @@ define double @double_fmax_release(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB15_1: # %atomicrmw.start
|
||||
@@ -1103,7 +1209,13 @@ define double @double_fmax_release(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB15_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -1133,7 +1245,8 @@ define double @double_fmax_release(ptr %p) nounwind {
|
||||
define float @float_fadd_acq_rel(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fadd_acq_rel:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -1166,7 +1279,8 @@ define float @float_fadd_acq_rel(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fadd_acq_rel:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB16_1: # %atomicrmw.start
|
||||
@@ -1202,7 +1316,8 @@ define float @float_fadd_acq_rel(ptr %p) nounwind {
|
||||
define float @float_fsub_acq_rel(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fsub_acq_rel:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, -264192
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -1235,7 +1350,8 @@ define float @float_fsub_acq_rel(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fsub_acq_rel:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1040
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB17_1: # %atomicrmw.start
|
||||
@@ -1271,7 +1387,8 @@ define float @float_fsub_acq_rel(ptr %p) nounwind {
|
||||
define float @float_fmin_acq_rel(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fmin_acq_rel:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -1304,7 +1421,8 @@ define float @float_fmin_acq_rel(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fmin_acq_rel:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB18_1: # %atomicrmw.start
|
||||
@@ -1340,7 +1458,8 @@ define float @float_fmin_acq_rel(ptr %p) nounwind {
|
||||
define float @float_fmax_acq_rel(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fmax_acq_rel:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -1373,7 +1492,8 @@ define float @float_fmax_acq_rel(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fmax_acq_rel:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB19_1: # %atomicrmw.start
|
||||
@@ -1415,7 +1535,13 @@ define double @double_fadd_acq_rel(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB20_1: # %atomicrmw.start
|
||||
@@ -1451,7 +1577,13 @@ define double @double_fadd_acq_rel(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB20_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -1487,7 +1619,13 @@ define double @double_fsub_acq_rel(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, -1025
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB21_1: # %atomicrmw.start
|
||||
@@ -1523,7 +1661,13 @@ define double @double_fsub_acq_rel(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB21_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -1559,7 +1703,13 @@ define double @double_fmin_acq_rel(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB22_1: # %atomicrmw.start
|
||||
@@ -1595,7 +1745,13 @@ define double @double_fmin_acq_rel(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB22_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -1631,7 +1787,13 @@ define double @double_fmax_acq_rel(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB23_1: # %atomicrmw.start
|
||||
@@ -1667,7 +1829,13 @@ define double @double_fmax_acq_rel(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB23_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -1697,7 +1865,8 @@ define double @double_fmax_acq_rel(ptr %p) nounwind {
|
||||
define float @float_fadd_seq_cst(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fadd_seq_cst:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -1730,7 +1899,8 @@ define float @float_fadd_seq_cst(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fadd_seq_cst:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB24_1: # %atomicrmw.start
|
||||
@@ -1766,7 +1936,8 @@ define float @float_fadd_seq_cst(ptr %p) nounwind {
|
||||
define float @float_fsub_seq_cst(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fsub_seq_cst:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, -264192
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -1799,7 +1970,8 @@ define float @float_fsub_seq_cst(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fsub_seq_cst:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1040
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB25_1: # %atomicrmw.start
|
||||
@@ -1835,7 +2007,8 @@ define float @float_fsub_seq_cst(ptr %p) nounwind {
|
||||
define float @float_fmin_seq_cst(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fmin_seq_cst:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -1868,7 +2041,8 @@ define float @float_fmin_seq_cst(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fmin_seq_cst:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB26_1: # %atomicrmw.start
|
||||
@@ -1904,7 +2078,8 @@ define float @float_fmin_seq_cst(ptr %p) nounwind {
|
||||
define float @float_fmax_seq_cst(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fmax_seq_cst:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -1937,7 +2112,8 @@ define float @float_fmax_seq_cst(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fmax_seq_cst:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB27_1: # %atomicrmw.start
|
||||
@@ -1979,7 +2155,13 @@ define double @double_fadd_seq_cst(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB28_1: # %atomicrmw.start
|
||||
@@ -2015,7 +2197,13 @@ define double @double_fadd_seq_cst(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB28_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -2051,7 +2239,13 @@ define double @double_fsub_seq_cst(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, -1025
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB29_1: # %atomicrmw.start
|
||||
@@ -2087,7 +2281,13 @@ define double @double_fsub_seq_cst(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB29_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -2123,7 +2323,13 @@ define double @double_fmin_seq_cst(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB30_1: # %atomicrmw.start
|
||||
@@ -2159,7 +2365,13 @@ define double @double_fmin_seq_cst(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB30_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -2195,7 +2407,13 @@ define double @double_fmax_seq_cst(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB31_1: # %atomicrmw.start
|
||||
@@ -2231,7 +2449,13 @@ define double @double_fmax_seq_cst(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB31_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -2261,7 +2485,8 @@ define double @double_fmax_seq_cst(ptr %p) nounwind {
|
||||
define float @float_fadd_monotonic(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fadd_monotonic:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -2294,7 +2519,8 @@ define float @float_fadd_monotonic(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fadd_monotonic:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB32_1: # %atomicrmw.start
|
||||
@@ -2330,7 +2556,8 @@ define float @float_fadd_monotonic(ptr %p) nounwind {
|
||||
define float @float_fsub_monotonic(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fsub_monotonic:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, -264192
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -2363,7 +2590,8 @@ define float @float_fsub_monotonic(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fsub_monotonic:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1040
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB33_1: # %atomicrmw.start
|
||||
@@ -2399,7 +2627,8 @@ define float @float_fsub_monotonic(ptr %p) nounwind {
|
||||
define float @float_fmin_monotonic(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fmin_monotonic:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -2432,7 +2661,8 @@ define float @float_fmin_monotonic(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fmin_monotonic:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB34_1: # %atomicrmw.start
|
||||
@@ -2468,7 +2698,8 @@ define float @float_fmin_monotonic(ptr %p) nounwind {
|
||||
define float @float_fmax_monotonic(ptr %p) nounwind {
|
||||
; LA64F-LABEL: float_fmax_monotonic:
|
||||
; LA64F: # %bb.0:
|
||||
; LA64F-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64F-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64F-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64F-NEXT: lu12i.w $a1, 260096
|
||||
; LA64F-NEXT: movgr2fr.w $fa1, $a1
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
@@ -2501,7 +2732,8 @@ define float @float_fmax_monotonic(ptr %p) nounwind {
|
||||
;
|
||||
; LA64D-LABEL: float_fmax_monotonic:
|
||||
; LA64D: # %bb.0:
|
||||
; LA64D-NEXT: fld.s $fa0, $a0, 0
|
||||
; LA64D-NEXT: ld.w $a1, $a0, 0
|
||||
; LA64D-NEXT: movgr2fr.w $fa0, $a1
|
||||
; LA64D-NEXT: vldi $vr1, -1168
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB35_1: # %atomicrmw.start
|
||||
@@ -2543,7 +2775,13 @@ define double @double_fadd_monotonic(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB36_1: # %atomicrmw.start
|
||||
@@ -2579,7 +2817,13 @@ define double @double_fadd_monotonic(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB36_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -2615,7 +2859,13 @@ define double @double_fsub_monotonic(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, -1025
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB37_1: # %atomicrmw.start
|
||||
@@ -2651,7 +2901,13 @@ define double @double_fsub_monotonic(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB37_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -2687,7 +2943,13 @@ define double @double_fmin_monotonic(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB38_1: # %atomicrmw.start
|
||||
@@ -2723,7 +2985,13 @@ define double @double_fmin_monotonic(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB38_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -2759,7 +3027,13 @@ define double @double_fmax_monotonic(ptr %p) nounwind {
|
||||
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64F-NEXT: move $fp, $a0
|
||||
; LA64F-NEXT: ld.d $s1, $a0, 0
|
||||
; LA64F-NEXT: ori $a0, $zero, 8
|
||||
; LA64F-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64F-NEXT: move $a1, $fp
|
||||
; LA64F-NEXT: move $a3, $zero
|
||||
; LA64F-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64F-NEXT: jirl $ra, $ra, 0
|
||||
; LA64F-NEXT: ld.d $s1, $sp, 8
|
||||
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
|
||||
; LA64F-NEXT: .p2align 4, , 16
|
||||
; LA64F-NEXT: .LBB39_1: # %atomicrmw.start
|
||||
@@ -2795,7 +3069,13 @@ define double @double_fmax_monotonic(ptr %p) nounwind {
|
||||
; LA64D-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
||||
; LA64D-NEXT: move $fp, $a0
|
||||
; LA64D-NEXT: fld.d $fa0, $a0, 0
|
||||
; LA64D-NEXT: ori $a0, $zero, 8
|
||||
; LA64D-NEXT: addi.d $a2, $sp, 8
|
||||
; LA64D-NEXT: move $a1, $fp
|
||||
; LA64D-NEXT: move $a3, $zero
|
||||
; LA64D-NEXT: pcaddu18i $ra, %call36(__atomic_load)
|
||||
; LA64D-NEXT: jirl $ra, $ra, 0
|
||||
; LA64D-NEXT: fld.d $fa0, $sp, 8
|
||||
; LA64D-NEXT: .p2align 4, , 16
|
||||
; LA64D-NEXT: .LBB39_1: # %atomicrmw.start
|
||||
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,27 +1,35 @@
|
||||
; RUN: not llc -mtriple=nvptx64 -mcpu=sm_30 -filetype=null %s 2>&1 | FileCheck %s
|
||||
; RUN: not llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx83 -filetype=null 2>&1 | FileCheck %s
|
||||
|
||||
; CHECK: error: unsupported atomicrmw xchg: instruction alignment 16 is smaller than the required 32-byte alignment for this atomic operation; target supports atomics up to 8 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw xor: instruction alignment 16 is smaller than the required 32-byte alignment for this atomic operation; target supports atomics up to 8 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw or: instruction alignment 16 is smaller than the required 32-byte alignment for this atomic operation; target supports atomics up to 8 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw and: instruction alignment 16 is smaller than the required 32-byte alignment for this atomic operation; target supports atomics up to 8 bytes, but this atomic accesses 32 bytes
|
||||
define void @bitwise_i256(ptr %0, i256 %1) {
|
||||
; CHECK: error: unsupported atomic load: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw xchg: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomic load: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw xor: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomic load: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw or: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomic load: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw and: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
entry:
|
||||
%2 = atomicrmw and ptr %0, i256 %1 monotonic, align 16
|
||||
%3 = atomicrmw or ptr %0, i256 %1 monotonic, align 16
|
||||
%4 = atomicrmw xor ptr %0, i256 %1 monotonic, align 16
|
||||
%5 = atomicrmw xchg ptr %0, i256 %1 monotonic, align 16
|
||||
%2 = atomicrmw and ptr %0, i256 %1 monotonic
|
||||
%3 = atomicrmw or ptr %0, i256 %1 monotonic
|
||||
%4 = atomicrmw xor ptr %0, i256 %1 monotonic
|
||||
%5 = atomicrmw xchg ptr %0, i256 %1 monotonic
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: error: unsupported atomicrmw umax: instruction alignment 16 is smaller than the required 32-byte alignment for this atomic operation; target supports atomics up to 8 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw umin: instruction alignment 16 is smaller than the required 32-byte alignment for this atomic operation; target supports atomics up to 8 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw max: instruction alignment 16 is smaller than the required 32-byte alignment for this atomic operation; target supports atomics up to 8 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw min: instruction alignment 16 is smaller than the required 32-byte alignment for this atomic operation; target supports atomics up to 8 bytes, but this atomic accesses 32 bytes
|
||||
define void @minmax_i256(ptr %0, i256 %1) {
|
||||
; CHECK: error: unsupported atomic load: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw umax: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomic load: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw umin: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomic load: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw max: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomic load: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
; CHECK: error: unsupported atomicrmw min: target supports atomics up to 16 bytes, but this atomic accesses 32 bytes
|
||||
entry:
|
||||
%2 = atomicrmw min ptr %0, i256 %1 monotonic, align 16
|
||||
%3 = atomicrmw max ptr %0, i256 %1 monotonic, align 16
|
||||
%4 = atomicrmw umin ptr %0, i256 %1 monotonic, align 16
|
||||
%5 = atomicrmw umax ptr %0, i256 %1 monotonic, align 16
|
||||
%2 = atomicrmw min ptr %0, i256 %1 monotonic
|
||||
%3 = atomicrmw max ptr %0, i256 %1 monotonic
|
||||
%4 = atomicrmw umin ptr %0, i256 %1 monotonic
|
||||
%5 = atomicrmw umax ptr %0, i256 %1 monotonic
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ define i8 @xchg_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r9, %r8, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r9;
|
||||
; SM60-NEXT: shl.b32 %r3, %r5, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: $L__BB0_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r10, %r13, %r2;
|
||||
@@ -58,7 +58,7 @@ define i16 @xchg_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: shl.b32 %r9, %r8, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r9;
|
||||
; SM60-NEXT: shl.b32 %r3, %r5, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: $L__BB1_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r10, %r13, %r2;
|
||||
@@ -126,7 +126,7 @@ define i8 @add_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB4_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -165,7 +165,7 @@ define i16 @add_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB5_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -235,7 +235,7 @@ define i8 @sub_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB8_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: sub.s32 %r10, %r15, %r4;
|
||||
@@ -274,7 +274,7 @@ define i16 @sub_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB9_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: sub.s32 %r10, %r15, %r4;
|
||||
@@ -433,7 +433,7 @@ define i8 @nand_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: $L__BB16_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -473,7 +473,7 @@ define i16 @nand_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: $L__BB17_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -505,7 +505,7 @@ define i32 @nand_acq_rel_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM60-NEXT: ld.param.b32 %r2, [nand_acq_rel_i32_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [nand_acq_rel_i32_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: $L__BB18_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -532,7 +532,7 @@ define i64 @nand_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM60-NEXT: ld.param.b64 %rd3, [nand_acq_rel_i64_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd2, [nand_acq_rel_i64_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM60-NEXT: ld.volatile.global.b64 %rd6, [%rd2];
|
||||
; SM60-NEXT: $L__BB19_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b64 %rd4, %rd6, %rd3;
|
||||
@@ -726,7 +726,7 @@ define i8 @max_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 255;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: cvt.s16.s8 %rs3, %rs1;
|
||||
; SM60-NEXT: $L__BB28_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -770,7 +770,7 @@ define i16 @max_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB29_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -843,7 +843,7 @@ define i8 @min_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 255;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: cvt.s16.s8 %rs3, %rs1;
|
||||
; SM60-NEXT: $L__BB32_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -887,7 +887,7 @@ define i16 @min_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB33_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -960,7 +960,7 @@ define i8 @umax_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 255;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB36_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1003,7 +1003,7 @@ define i16 @umax_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB37_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1076,7 +1076,7 @@ define i8 @umin_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 255;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB40_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1119,7 +1119,7 @@ define i16 @umin_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB41_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1192,7 +1192,7 @@ define i8 @uinc_wrap_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 255;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB44_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r15, %r1;
|
||||
@@ -1238,7 +1238,7 @@ define i16 @uinc_wrap_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB45_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1289,7 +1289,7 @@ define i64 @uinc_wrap_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM60-NEXT: ld.param.b64 %rd3, [uinc_wrap_acq_rel_i64_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd2, [uinc_wrap_acq_rel_i64_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM60-NEXT: ld.volatile.global.b64 %rd6, [%rd2];
|
||||
; SM60-NEXT: $L__BB47_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: add.s64 %rd4, %rd6, 1;
|
||||
@@ -1326,7 +1326,7 @@ define i8 @udec_wrap_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 255;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB48_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r15, %r1;
|
||||
@@ -1374,7 +1374,7 @@ define i16 @udec_wrap_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB49_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1427,7 +1427,7 @@ define i64 @udec_wrap_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM60-NEXT: ld.param.b64 %rd3, [udec_wrap_acq_rel_i64_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd2, [udec_wrap_acq_rel_i64_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b64 %rd7, [%rd2];
|
||||
; SM60-NEXT: ld.volatile.global.b64 %rd7, [%rd2];
|
||||
; SM60-NEXT: $L__BB51_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: add.s64 %rd4, %rd7, -1;
|
||||
@@ -1466,7 +1466,7 @@ define i8 @usub_cond_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 255;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB52_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r15, %r1;
|
||||
@@ -1512,7 +1512,7 @@ define i16 @usub_cond_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB53_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1548,7 +1548,7 @@ define i32 @usub_cond_acq_rel_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM60-NEXT: ld.param.b32 %r2, [usub_cond_acq_rel_i32_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [usub_cond_acq_rel_i32_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: $L__BB54_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: setp.ge.u32 %p1, %r5, %r2;
|
||||
@@ -1576,7 +1576,7 @@ define i64 @usub_cond_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM60-NEXT: ld.param.b64 %rd3, [usub_cond_acq_rel_i64_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd2, [usub_cond_acq_rel_i64_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM60-NEXT: ld.volatile.global.b64 %rd6, [%rd2];
|
||||
; SM60-NEXT: $L__BB55_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: setp.ge.u64 %p1, %rd6, %rd3;
|
||||
@@ -1613,7 +1613,7 @@ define i8 @usub_sat_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 255;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB56_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1657,7 +1657,7 @@ define i16 @usub_sat_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB57_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1692,7 +1692,7 @@ define i32 @usub_sat_acq_rel_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM60-NEXT: ld.param.b32 %r2, [usub_sat_acq_rel_i32_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [usub_sat_acq_rel_i32_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: $L__BB58_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: max.u32 %r3, %r5, %r2;
|
||||
@@ -1719,7 +1719,7 @@ define i64 @usub_sat_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM60-NEXT: ld.param.b64 %rd3, [usub_sat_acq_rel_i64_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd2, [usub_sat_acq_rel_i64_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM60-NEXT: ld.volatile.global.b64 %rd6, [%rd2];
|
||||
; SM60-NEXT: $L__BB59_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: max.u64 %rd4, %rd6, %rd3;
|
||||
@@ -1763,7 +1763,7 @@ define float @fsub_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %val)
|
||||
; SM60-NEXT: ld.param.b32 %r2, [fsub_acq_rel_float_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [fsub_acq_rel_float_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r4, [%rd1];
|
||||
; SM60-NEXT: $L__BB61_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: sub.rn.f32 %r3, %r4, %r2;
|
||||
@@ -1790,7 +1790,7 @@ define float @fmin_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %val)
|
||||
; SM60-NEXT: ld.param.b32 %r2, [fmin_acq_rel_float_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [fmin_acq_rel_float_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r4, [%rd1];
|
||||
; SM60-NEXT: $L__BB62_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: min.f32 %r3, %r4, %r2;
|
||||
@@ -1817,7 +1817,7 @@ define float @fmax_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %val)
|
||||
; SM60-NEXT: ld.param.b32 %r2, [fmax_acq_rel_float_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [fmax_acq_rel_float_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r4, [%rd1];
|
||||
; SM60-NEXT: $L__BB63_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: max.f32 %r3, %r4, %r2;
|
||||
@@ -1844,7 +1844,7 @@ define float @fminimum_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %v
|
||||
; SM60-NEXT: ld.param.b32 %r2, [fminimum_acq_rel_float_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [fminimum_acq_rel_float_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b32 %r8, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r8, [%rd1];
|
||||
; SM60-NEXT: setp.eq.b32 %p3, %r2, -2147483648;
|
||||
; SM60-NEXT: $L__BB64_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1879,7 +1879,7 @@ define float @fmaximum_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %v
|
||||
; SM60-NEXT: ld.param.b32 %r2, [fmaximum_acq_rel_float_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [fmaximum_acq_rel_float_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b32 %r8, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r8, [%rd1];
|
||||
; SM60-NEXT: setp.eq.b32 %p3, %r2, 0;
|
||||
; SM60-NEXT: $L__BB65_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1928,7 +1928,7 @@ define double @fsub_acq_rel_double_global_cta(ptr addrspace(1) %addr, double %va
|
||||
; SM60-NEXT: ld.param.b64 %rd3, [fsub_acq_rel_double_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd2, [fsub_acq_rel_double_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b64 %rd5, [%rd2];
|
||||
; SM60-NEXT: ld.volatile.global.b64 %rd5, [%rd2];
|
||||
; SM60-NEXT: $L__BB67_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: sub.rn.f64 %rd4, %rd5, %rd3;
|
||||
@@ -1954,7 +1954,7 @@ define double @fmin_acq_rel_double_global_cta(ptr addrspace(1) %addr, double %va
|
||||
; SM60-NEXT: ld.param.b64 %rd3, [fmin_acq_rel_double_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd2, [fmin_acq_rel_double_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b64 %rd5, [%rd2];
|
||||
; SM60-NEXT: ld.volatile.global.b64 %rd5, [%rd2];
|
||||
; SM60-NEXT: $L__BB68_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: min.f64 %rd4, %rd5, %rd3;
|
||||
@@ -1980,7 +1980,7 @@ define double @fmax_acq_rel_double_global_cta(ptr addrspace(1) %addr, double %va
|
||||
; SM60-NEXT: ld.param.b64 %rd3, [fmax_acq_rel_double_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd2, [fmax_acq_rel_double_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b64 %rd5, [%rd2];
|
||||
; SM60-NEXT: ld.volatile.global.b64 %rd5, [%rd2];
|
||||
; SM60-NEXT: $L__BB69_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: max.f64 %rd4, %rd5, %rd3;
|
||||
@@ -2006,7 +2006,7 @@ define double @fminimum_acq_rel_double_global_cta(ptr addrspace(1) %addr, double
|
||||
; SM60-NEXT: ld.param.b64 %rd3, [fminimum_acq_rel_double_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd2, [fminimum_acq_rel_double_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b64 %rd9, [%rd2];
|
||||
; SM60-NEXT: ld.volatile.global.b64 %rd9, [%rd2];
|
||||
; SM60-NEXT: setp.eq.b64 %p3, %rd3, -9223372036854775808;
|
||||
; SM60-NEXT: $L__BB70_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2040,7 +2040,7 @@ define double @fmaximum_acq_rel_double_global_cta(ptr addrspace(1) %addr, double
|
||||
; SM60-NEXT: ld.param.b64 %rd3, [fmaximum_acq_rel_double_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd2, [fmaximum_acq_rel_double_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b64 %rd9, [%rd2];
|
||||
; SM60-NEXT: ld.volatile.global.b64 %rd9, [%rd2];
|
||||
; SM60-NEXT: setp.eq.b64 %p3, %rd3, 0;
|
||||
; SM60-NEXT: $L__BB71_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2083,7 +2083,7 @@ define half @fadd_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB72_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2125,7 +2125,7 @@ define half @fsub_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: $L__BB73_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2167,7 +2167,7 @@ define half @fmin_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r17, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r17, [%rd1];
|
||||
; SM60-NEXT: cvt.f32.f16 %r10, %rs1;
|
||||
; SM60-NEXT: $L__BB74_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2212,7 +2212,7 @@ define half @fmax_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val) {
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r17, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r17, [%rd1];
|
||||
; SM60-NEXT: cvt.f32.f16 %r10, %rs1;
|
||||
; SM60-NEXT: $L__BB75_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2257,7 +2257,7 @@ define half @fminimum_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val)
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: setp.eq.b16 %p4, %rs1, -32768;
|
||||
; SM60-NEXT: $L__BB76_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2309,7 +2309,7 @@ define half @fmaximum_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val)
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r14, [%rd1];
|
||||
; SM60-NEXT: setp.eq.b16 %p4, %rs1, 0;
|
||||
; SM60-NEXT: $L__BB77_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2361,7 +2361,7 @@ define bfloat @fadd_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r23, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r23, [%rd1];
|
||||
; SM60-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM60-NEXT: $L__BB78_1: // %atomicrmw.start
|
||||
@@ -2411,7 +2411,7 @@ define bfloat @fsub_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r23, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r23, [%rd1];
|
||||
; SM60-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM60-NEXT: $L__BB79_1: // %atomicrmw.start
|
||||
@@ -2461,7 +2461,7 @@ define bfloat @fmin_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r23, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r23, [%rd1];
|
||||
; SM60-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM60-NEXT: $L__BB80_1: // %atomicrmw.start
|
||||
@@ -2511,7 +2511,7 @@ define bfloat @fmax_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r23, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r23, [%rd1];
|
||||
; SM60-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM60-NEXT: $L__BB81_1: // %atomicrmw.start
|
||||
@@ -2561,7 +2561,7 @@ define bfloat @fminimum_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r19, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r19, [%rd1];
|
||||
; SM60-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM60-NEXT: setp.eq.b16 %p4, %rs1, -32768;
|
||||
@@ -2617,7 +2617,7 @@ define bfloat @fmaximum_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat
|
||||
; SM60-NEXT: mov.b32 %r6, 65535;
|
||||
; SM60-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM60-NEXT: not.b32 %r2, %r7;
|
||||
; SM60-NEXT: ld.global.b32 %r19, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r19, [%rd1];
|
||||
; SM60-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM60-NEXT: setp.eq.b16 %p4, %rs1, 0;
|
||||
@@ -2672,7 +2672,7 @@ define i8 @add_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB84_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2709,7 +2709,7 @@ define i8 @add_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB85_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2748,7 +2748,7 @@ define i8 @add_release_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB86_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2786,7 +2786,7 @@ define i8 @add_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r15, [%rd1];
|
||||
; SM60-NEXT: $L__BB87_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2889,7 +2889,7 @@ define i8 @nand_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: $L__BB92_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -2927,7 +2927,7 @@ define i8 @nand_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: $L__BB93_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -2967,7 +2967,7 @@ define i8 @nand_release_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: $L__BB94_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -3006,7 +3006,7 @@ define i8 @nand_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM60-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM60-NEXT: not.b32 %r3, %r2;
|
||||
; SM60-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r16, [%rd1];
|
||||
; SM60-NEXT: $L__BB95_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -3037,7 +3037,7 @@ define i32 @nand_monotonic_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM60-NEXT: // %bb.0:
|
||||
; SM60-NEXT: ld.param.b32 %r2, [nand_monotonic_i32_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [nand_monotonic_i32_global_cta_param_0];
|
||||
; SM60-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: $L__BB96_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -3063,7 +3063,7 @@ define i32 @nand_acquire_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM60-NEXT: // %bb.0:
|
||||
; SM60-NEXT: ld.param.b32 %r2, [nand_acquire_i32_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [nand_acquire_i32_global_cta_param_0];
|
||||
; SM60-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: $L__BB97_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -3091,7 +3091,7 @@ define i32 @nand_release_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM60-NEXT: ld.param.b32 %r2, [nand_release_i32_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [nand_release_i32_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: $L__BB98_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -3118,7 +3118,7 @@ define i32 @nand_seq_cst_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM60-NEXT: ld.param.b32 %r2, [nand_seq_cst_i32_global_cta_param_1];
|
||||
; SM60-NEXT: ld.param.b64 %rd1, [nand_seq_cst_i32_global_cta_param_0];
|
||||
; SM60-NEXT: membar.cta;
|
||||
; SM60-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r5, [%rd1];
|
||||
; SM60-NEXT: $L__BB99_1: // %atomicrmw.start
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM60-NEXT: and.b32 %r3, %r5, %r2;
|
||||
|
||||
@@ -21,7 +21,7 @@ define i8 @xchg_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r9, %r8, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r9;
|
||||
; SM70-NEXT: shl.b32 %r3, %r5, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: $L__BB0_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r10, %r13, %r2;
|
||||
@@ -58,7 +58,7 @@ define i16 @xchg_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: shl.b32 %r9, %r8, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r9;
|
||||
; SM70-NEXT: shl.b32 %r3, %r5, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: $L__BB1_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r10, %r13, %r2;
|
||||
@@ -126,7 +126,7 @@ define i8 @add_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB4_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -165,7 +165,7 @@ define i16 @add_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB5_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -235,7 +235,7 @@ define i8 @sub_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB8_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: sub.s32 %r10, %r15, %r4;
|
||||
@@ -274,7 +274,7 @@ define i16 @sub_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB9_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: sub.s32 %r10, %r15, %r4;
|
||||
@@ -433,7 +433,7 @@ define i8 @nand_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: $L__BB16_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -473,7 +473,7 @@ define i16 @nand_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: $L__BB17_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -505,7 +505,7 @@ define i32 @nand_acq_rel_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM70-NEXT: ld.param.b32 %r2, [nand_acq_rel_i32_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [nand_acq_rel_i32_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: $L__BB18_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -532,7 +532,7 @@ define i64 @nand_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM70-NEXT: ld.param.b64 %rd3, [nand_acq_rel_i64_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd2, [nand_acq_rel_i64_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b64 %rd6, [%rd2];
|
||||
; SM70-NEXT: $L__BB19_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b64 %rd4, %rd6, %rd3;
|
||||
@@ -726,7 +726,7 @@ define i8 @max_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 255;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: cvt.s16.s8 %rs3, %rs1;
|
||||
; SM70-NEXT: $L__BB28_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -770,7 +770,7 @@ define i16 @max_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB29_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -843,7 +843,7 @@ define i8 @min_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 255;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: cvt.s16.s8 %rs3, %rs1;
|
||||
; SM70-NEXT: $L__BB32_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -887,7 +887,7 @@ define i16 @min_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB33_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -960,7 +960,7 @@ define i8 @umax_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 255;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB36_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1003,7 +1003,7 @@ define i16 @umax_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB37_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1076,7 +1076,7 @@ define i8 @umin_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 255;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB40_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1119,7 +1119,7 @@ define i16 @umin_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB41_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1192,7 +1192,7 @@ define i8 @uinc_wrap_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 255;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB44_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r15, %r1;
|
||||
@@ -1238,7 +1238,7 @@ define i16 @uinc_wrap_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB45_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1289,7 +1289,7 @@ define i64 @uinc_wrap_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM70-NEXT: ld.param.b64 %rd3, [uinc_wrap_acq_rel_i64_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd2, [uinc_wrap_acq_rel_i64_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b64 %rd6, [%rd2];
|
||||
; SM70-NEXT: $L__BB47_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: add.s64 %rd4, %rd6, 1;
|
||||
@@ -1326,7 +1326,7 @@ define i8 @udec_wrap_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 255;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB48_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r15, %r1;
|
||||
@@ -1374,7 +1374,7 @@ define i16 @udec_wrap_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB49_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1427,7 +1427,7 @@ define i64 @udec_wrap_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM70-NEXT: ld.param.b64 %rd3, [udec_wrap_acq_rel_i64_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd2, [udec_wrap_acq_rel_i64_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b64 %rd7, [%rd2];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b64 %rd7, [%rd2];
|
||||
; SM70-NEXT: $L__BB51_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: add.s64 %rd4, %rd7, -1;
|
||||
@@ -1466,7 +1466,7 @@ define i8 @usub_cond_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 255;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB52_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r15, %r1;
|
||||
@@ -1512,7 +1512,7 @@ define i16 @usub_cond_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB53_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1548,7 +1548,7 @@ define i32 @usub_cond_acq_rel_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM70-NEXT: ld.param.b32 %r2, [usub_cond_acq_rel_i32_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [usub_cond_acq_rel_i32_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: $L__BB54_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: setp.ge.u32 %p1, %r5, %r2;
|
||||
@@ -1576,7 +1576,7 @@ define i64 @usub_cond_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM70-NEXT: ld.param.b64 %rd3, [usub_cond_acq_rel_i64_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd2, [usub_cond_acq_rel_i64_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b64 %rd6, [%rd2];
|
||||
; SM70-NEXT: $L__BB55_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: setp.ge.u64 %p1, %rd6, %rd3;
|
||||
@@ -1613,7 +1613,7 @@ define i8 @usub_sat_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 255;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB56_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1657,7 +1657,7 @@ define i16 @usub_sat_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB57_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1692,7 +1692,7 @@ define i32 @usub_sat_acq_rel_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM70-NEXT: ld.param.b32 %r2, [usub_sat_acq_rel_i32_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [usub_sat_acq_rel_i32_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: $L__BB58_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: max.u32 %r3, %r5, %r2;
|
||||
@@ -1719,7 +1719,7 @@ define i64 @usub_sat_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM70-NEXT: ld.param.b64 %rd3, [usub_sat_acq_rel_i64_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd2, [usub_sat_acq_rel_i64_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b64 %rd6, [%rd2];
|
||||
; SM70-NEXT: $L__BB59_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: max.u64 %rd4, %rd6, %rd3;
|
||||
@@ -1763,7 +1763,7 @@ define float @fsub_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %val)
|
||||
; SM70-NEXT: ld.param.b32 %r2, [fsub_acq_rel_float_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [fsub_acq_rel_float_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r4, [%rd1];
|
||||
; SM70-NEXT: $L__BB61_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: sub.rn.f32 %r3, %r4, %r2;
|
||||
@@ -1790,7 +1790,7 @@ define float @fmin_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %val)
|
||||
; SM70-NEXT: ld.param.b32 %r2, [fmin_acq_rel_float_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [fmin_acq_rel_float_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r4, [%rd1];
|
||||
; SM70-NEXT: $L__BB62_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: min.f32 %r3, %r4, %r2;
|
||||
@@ -1817,7 +1817,7 @@ define float @fmax_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %val)
|
||||
; SM70-NEXT: ld.param.b32 %r2, [fmax_acq_rel_float_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [fmax_acq_rel_float_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r4, [%rd1];
|
||||
; SM70-NEXT: $L__BB63_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: max.f32 %r3, %r4, %r2;
|
||||
@@ -1844,7 +1844,7 @@ define float @fminimum_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %v
|
||||
; SM70-NEXT: ld.param.b32 %r2, [fminimum_acq_rel_float_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [fminimum_acq_rel_float_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b32 %r8, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r8, [%rd1];
|
||||
; SM70-NEXT: setp.eq.b32 %p3, %r2, -2147483648;
|
||||
; SM70-NEXT: $L__BB64_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1879,7 +1879,7 @@ define float @fmaximum_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %v
|
||||
; SM70-NEXT: ld.param.b32 %r2, [fmaximum_acq_rel_float_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [fmaximum_acq_rel_float_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b32 %r8, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r8, [%rd1];
|
||||
; SM70-NEXT: setp.eq.b32 %p3, %r2, 0;
|
||||
; SM70-NEXT: $L__BB65_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1928,7 +1928,7 @@ define double @fsub_acq_rel_double_global_cta(ptr addrspace(1) %addr, double %va
|
||||
; SM70-NEXT: ld.param.b64 %rd3, [fsub_acq_rel_double_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd2, [fsub_acq_rel_double_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b64 %rd5, [%rd2];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b64 %rd5, [%rd2];
|
||||
; SM70-NEXT: $L__BB67_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: sub.rn.f64 %rd4, %rd5, %rd3;
|
||||
@@ -1954,7 +1954,7 @@ define double @fmin_acq_rel_double_global_cta(ptr addrspace(1) %addr, double %va
|
||||
; SM70-NEXT: ld.param.b64 %rd3, [fmin_acq_rel_double_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd2, [fmin_acq_rel_double_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b64 %rd5, [%rd2];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b64 %rd5, [%rd2];
|
||||
; SM70-NEXT: $L__BB68_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: min.f64 %rd4, %rd5, %rd3;
|
||||
@@ -1980,7 +1980,7 @@ define double @fmax_acq_rel_double_global_cta(ptr addrspace(1) %addr, double %va
|
||||
; SM70-NEXT: ld.param.b64 %rd3, [fmax_acq_rel_double_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd2, [fmax_acq_rel_double_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b64 %rd5, [%rd2];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b64 %rd5, [%rd2];
|
||||
; SM70-NEXT: $L__BB69_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: max.f64 %rd4, %rd5, %rd3;
|
||||
@@ -2006,7 +2006,7 @@ define double @fminimum_acq_rel_double_global_cta(ptr addrspace(1) %addr, double
|
||||
; SM70-NEXT: ld.param.b64 %rd3, [fminimum_acq_rel_double_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd2, [fminimum_acq_rel_double_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b64 %rd9, [%rd2];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b64 %rd9, [%rd2];
|
||||
; SM70-NEXT: setp.eq.b64 %p3, %rd3, -9223372036854775808;
|
||||
; SM70-NEXT: $L__BB70_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2040,7 +2040,7 @@ define double @fmaximum_acq_rel_double_global_cta(ptr addrspace(1) %addr, double
|
||||
; SM70-NEXT: ld.param.b64 %rd3, [fmaximum_acq_rel_double_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd2, [fmaximum_acq_rel_double_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b64 %rd9, [%rd2];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b64 %rd9, [%rd2];
|
||||
; SM70-NEXT: setp.eq.b64 %p3, %rd3, 0;
|
||||
; SM70-NEXT: $L__BB71_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2099,7 +2099,7 @@ define half @fsub_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: $L__BB73_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2141,7 +2141,7 @@ define half @fmin_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r17, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r17, [%rd1];
|
||||
; SM70-NEXT: cvt.f32.f16 %r10, %rs1;
|
||||
; SM70-NEXT: $L__BB74_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2186,7 +2186,7 @@ define half @fmax_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val) {
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r17, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r17, [%rd1];
|
||||
; SM70-NEXT: cvt.f32.f16 %r10, %rs1;
|
||||
; SM70-NEXT: $L__BB75_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2231,7 +2231,7 @@ define half @fminimum_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val)
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: setp.eq.b16 %p4, %rs1, -32768;
|
||||
; SM70-NEXT: $L__BB76_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2283,7 +2283,7 @@ define half @fmaximum_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val)
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM70-NEXT: setp.eq.b16 %p4, %rs1, 0;
|
||||
; SM70-NEXT: $L__BB77_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2335,7 +2335,7 @@ define bfloat @fadd_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r23, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r23, [%rd1];
|
||||
; SM70-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM70-NEXT: $L__BB78_1: // %atomicrmw.start
|
||||
@@ -2385,7 +2385,7 @@ define bfloat @fsub_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r23, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r23, [%rd1];
|
||||
; SM70-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM70-NEXT: $L__BB79_1: // %atomicrmw.start
|
||||
@@ -2435,7 +2435,7 @@ define bfloat @fmin_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r23, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r23, [%rd1];
|
||||
; SM70-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM70-NEXT: $L__BB80_1: // %atomicrmw.start
|
||||
@@ -2485,7 +2485,7 @@ define bfloat @fmax_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r23, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r23, [%rd1];
|
||||
; SM70-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM70-NEXT: $L__BB81_1: // %atomicrmw.start
|
||||
@@ -2535,7 +2535,7 @@ define bfloat @fminimum_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r19, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r19, [%rd1];
|
||||
; SM70-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM70-NEXT: setp.eq.b16 %p4, %rs1, -32768;
|
||||
@@ -2591,7 +2591,7 @@ define bfloat @fmaximum_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat
|
||||
; SM70-NEXT: mov.b32 %r6, 65535;
|
||||
; SM70-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM70-NEXT: not.b32 %r2, %r7;
|
||||
; SM70-NEXT: ld.global.b32 %r19, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r19, [%rd1];
|
||||
; SM70-NEXT: cvt.u32.u16 %r10, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r11, %r10, 16;
|
||||
; SM70-NEXT: setp.eq.b16 %p4, %rs1, 0;
|
||||
@@ -2646,7 +2646,7 @@ define i8 @add_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB84_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2683,7 +2683,7 @@ define i8 @add_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB85_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2722,7 +2722,7 @@ define i8 @add_release_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB86_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2760,7 +2760,7 @@ define i8 @add_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM70-NEXT: $L__BB87_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2863,7 +2863,7 @@ define i8 @nand_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: $L__BB92_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -2901,7 +2901,7 @@ define i8 @nand_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: $L__BB93_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -2941,7 +2941,7 @@ define i8 @nand_release_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: $L__BB94_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -2980,7 +2980,7 @@ define i8 @nand_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM70-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM70-NEXT: not.b32 %r3, %r2;
|
||||
; SM70-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM70-NEXT: $L__BB95_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -3011,7 +3011,7 @@ define i32 @nand_monotonic_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM70-NEXT: // %bb.0:
|
||||
; SM70-NEXT: ld.param.b32 %r2, [nand_monotonic_i32_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [nand_monotonic_i32_global_cta_param_0];
|
||||
; SM70-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: $L__BB96_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -3037,7 +3037,7 @@ define i32 @nand_acquire_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM70-NEXT: // %bb.0:
|
||||
; SM70-NEXT: ld.param.b32 %r2, [nand_acquire_i32_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [nand_acquire_i32_global_cta_param_0];
|
||||
; SM70-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: $L__BB97_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -3065,7 +3065,7 @@ define i32 @nand_release_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM70-NEXT: ld.param.b32 %r2, [nand_release_i32_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [nand_release_i32_global_cta_param_0];
|
||||
; SM70-NEXT: fence.acq_rel.cta;
|
||||
; SM70-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: $L__BB98_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -3092,7 +3092,7 @@ define i32 @nand_seq_cst_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM70-NEXT: ld.param.b32 %r2, [nand_seq_cst_i32_global_cta_param_1];
|
||||
; SM70-NEXT: ld.param.b64 %rd1, [nand_seq_cst_i32_global_cta_param_0];
|
||||
; SM70-NEXT: fence.sc.cta;
|
||||
; SM70-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM70-NEXT: $L__BB99_1: // %atomicrmw.start
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM70-NEXT: and.b32 %r3, %r5, %r2;
|
||||
|
||||
@@ -21,7 +21,7 @@ define i8 @xchg_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r9, %r8, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r9;
|
||||
; SM90-NEXT: shl.b32 %r3, %r5, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: $L__BB0_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r10, %r13, %r2;
|
||||
@@ -58,7 +58,7 @@ define i16 @xchg_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: shl.b32 %r9, %r8, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r9;
|
||||
; SM90-NEXT: shl.b32 %r3, %r5, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: $L__BB1_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r10, %r13, %r2;
|
||||
@@ -126,7 +126,7 @@ define i8 @add_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB4_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -165,7 +165,7 @@ define i16 @add_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB5_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -235,7 +235,7 @@ define i8 @sub_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB8_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: sub.s32 %r10, %r15, %r4;
|
||||
@@ -274,7 +274,7 @@ define i16 @sub_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB9_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: sub.s32 %r10, %r15, %r4;
|
||||
@@ -433,7 +433,7 @@ define i8 @nand_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: $L__BB16_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -473,7 +473,7 @@ define i16 @nand_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: $L__BB17_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -505,7 +505,7 @@ define i32 @nand_acq_rel_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM90-NEXT: ld.param.b32 %r2, [nand_acq_rel_i32_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [nand_acq_rel_i32_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: $L__BB18_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -532,7 +532,7 @@ define i64 @nand_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM90-NEXT: ld.param.b64 %rd3, [nand_acq_rel_i64_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd2, [nand_acq_rel_i64_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b64 %rd6, [%rd2];
|
||||
; SM90-NEXT: $L__BB19_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b64 %rd4, %rd6, %rd3;
|
||||
@@ -726,7 +726,7 @@ define i8 @max_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 255;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: cvt.s16.s8 %rs3, %rs1;
|
||||
; SM90-NEXT: $L__BB28_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -770,7 +770,7 @@ define i16 @max_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB29_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -843,7 +843,7 @@ define i8 @min_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 255;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: cvt.s16.s8 %rs3, %rs1;
|
||||
; SM90-NEXT: $L__BB32_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -887,7 +887,7 @@ define i16 @min_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB33_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -960,7 +960,7 @@ define i8 @umax_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 255;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB36_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1003,7 +1003,7 @@ define i16 @umax_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB37_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1076,7 +1076,7 @@ define i8 @umin_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 255;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB40_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1119,7 +1119,7 @@ define i16 @umin_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB41_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1192,7 +1192,7 @@ define i8 @uinc_wrap_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 255;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB44_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r15, %r1;
|
||||
@@ -1238,7 +1238,7 @@ define i16 @uinc_wrap_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB45_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1289,7 +1289,7 @@ define i64 @uinc_wrap_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM90-NEXT: ld.param.b64 %rd3, [uinc_wrap_acq_rel_i64_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd2, [uinc_wrap_acq_rel_i64_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b64 %rd6, [%rd2];
|
||||
; SM90-NEXT: $L__BB47_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: add.s64 %rd4, %rd6, 1;
|
||||
@@ -1326,7 +1326,7 @@ define i8 @udec_wrap_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 255;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB48_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r15, %r1;
|
||||
@@ -1374,7 +1374,7 @@ define i16 @udec_wrap_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB49_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1427,7 +1427,7 @@ define i64 @udec_wrap_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM90-NEXT: ld.param.b64 %rd3, [udec_wrap_acq_rel_i64_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd2, [udec_wrap_acq_rel_i64_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b64 %rd7, [%rd2];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b64 %rd7, [%rd2];
|
||||
; SM90-NEXT: $L__BB51_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: add.s64 %rd4, %rd7, -1;
|
||||
@@ -1466,7 +1466,7 @@ define i8 @usub_cond_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 255;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB52_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r15, %r1;
|
||||
@@ -1512,7 +1512,7 @@ define i16 @usub_cond_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB53_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1548,7 +1548,7 @@ define i32 @usub_cond_acq_rel_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM90-NEXT: ld.param.b32 %r2, [usub_cond_acq_rel_i32_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [usub_cond_acq_rel_i32_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: $L__BB54_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: setp.ge.u32 %p1, %r5, %r2;
|
||||
@@ -1576,7 +1576,7 @@ define i64 @usub_cond_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM90-NEXT: ld.param.b64 %rd3, [usub_cond_acq_rel_i64_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd2, [usub_cond_acq_rel_i64_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b64 %rd6, [%rd2];
|
||||
; SM90-NEXT: $L__BB55_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: setp.ge.u64 %p1, %rd6, %rd3;
|
||||
@@ -1613,7 +1613,7 @@ define i8 @usub_sat_acq_rel_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 255;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB56_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1657,7 +1657,7 @@ define i16 @usub_sat_acq_rel_i16_global_cta(ptr addrspace(1) %addr, i16 %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB57_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -1692,7 +1692,7 @@ define i32 @usub_sat_acq_rel_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM90-NEXT: ld.param.b32 %r2, [usub_sat_acq_rel_i32_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [usub_sat_acq_rel_i32_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: $L__BB58_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: max.u32 %r3, %r5, %r2;
|
||||
@@ -1719,7 +1719,7 @@ define i64 @usub_sat_acq_rel_i64_global_cta(ptr addrspace(1) %addr, i64 %val) {
|
||||
; SM90-NEXT: ld.param.b64 %rd3, [usub_sat_acq_rel_i64_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd2, [usub_sat_acq_rel_i64_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b64 %rd6, [%rd2];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b64 %rd6, [%rd2];
|
||||
; SM90-NEXT: $L__BB59_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: max.u64 %rd4, %rd6, %rd3;
|
||||
@@ -1763,7 +1763,7 @@ define float @fsub_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %val)
|
||||
; SM90-NEXT: ld.param.b32 %r2, [fsub_acq_rel_float_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [fsub_acq_rel_float_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r4, [%rd1];
|
||||
; SM90-NEXT: $L__BB61_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: sub.rn.f32 %r3, %r4, %r2;
|
||||
@@ -1790,7 +1790,7 @@ define float @fmin_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %val)
|
||||
; SM90-NEXT: ld.param.b32 %r2, [fmin_acq_rel_float_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [fmin_acq_rel_float_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r4, [%rd1];
|
||||
; SM90-NEXT: $L__BB62_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: min.f32 %r3, %r4, %r2;
|
||||
@@ -1817,7 +1817,7 @@ define float @fmax_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %val)
|
||||
; SM90-NEXT: ld.param.b32 %r2, [fmax_acq_rel_float_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [fmax_acq_rel_float_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r4, [%rd1];
|
||||
; SM90-NEXT: $L__BB63_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: max.f32 %r3, %r4, %r2;
|
||||
@@ -1844,7 +1844,7 @@ define float @fminimum_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %v
|
||||
; SM90-NEXT: ld.param.b32 %r2, [fminimum_acq_rel_float_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [fminimum_acq_rel_float_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r4, [%rd1];
|
||||
; SM90-NEXT: $L__BB64_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: min.NaN.f32 %r3, %r4, %r2;
|
||||
@@ -1871,7 +1871,7 @@ define float @fmaximum_acq_rel_float_global_cta(ptr addrspace(1) %addr, float %v
|
||||
; SM90-NEXT: ld.param.b32 %r2, [fmaximum_acq_rel_float_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [fmaximum_acq_rel_float_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b32 %r4, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r4, [%rd1];
|
||||
; SM90-NEXT: $L__BB65_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: max.NaN.f32 %r3, %r4, %r2;
|
||||
@@ -1912,7 +1912,7 @@ define double @fsub_acq_rel_double_global_cta(ptr addrspace(1) %addr, double %va
|
||||
; SM90-NEXT: ld.param.b64 %rd3, [fsub_acq_rel_double_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd2, [fsub_acq_rel_double_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b64 %rd5, [%rd2];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b64 %rd5, [%rd2];
|
||||
; SM90-NEXT: $L__BB67_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: sub.rn.f64 %rd4, %rd5, %rd3;
|
||||
@@ -1938,7 +1938,7 @@ define double @fmin_acq_rel_double_global_cta(ptr addrspace(1) %addr, double %va
|
||||
; SM90-NEXT: ld.param.b64 %rd3, [fmin_acq_rel_double_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd2, [fmin_acq_rel_double_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b64 %rd5, [%rd2];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b64 %rd5, [%rd2];
|
||||
; SM90-NEXT: $L__BB68_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: min.f64 %rd4, %rd5, %rd3;
|
||||
@@ -1964,7 +1964,7 @@ define double @fmax_acq_rel_double_global_cta(ptr addrspace(1) %addr, double %va
|
||||
; SM90-NEXT: ld.param.b64 %rd3, [fmax_acq_rel_double_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd2, [fmax_acq_rel_double_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b64 %rd5, [%rd2];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b64 %rd5, [%rd2];
|
||||
; SM90-NEXT: $L__BB69_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: max.f64 %rd4, %rd5, %rd3;
|
||||
@@ -1990,7 +1990,7 @@ define double @fminimum_acq_rel_double_global_cta(ptr addrspace(1) %addr, double
|
||||
; SM90-NEXT: ld.param.b64 %rd3, [fminimum_acq_rel_double_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd2, [fminimum_acq_rel_double_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b64 %rd9, [%rd2];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b64 %rd9, [%rd2];
|
||||
; SM90-NEXT: setp.eq.b64 %p3, %rd3, -9223372036854775808;
|
||||
; SM90-NEXT: $L__BB70_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2024,7 +2024,7 @@ define double @fmaximum_acq_rel_double_global_cta(ptr addrspace(1) %addr, double
|
||||
; SM90-NEXT: ld.param.b64 %rd3, [fmaximum_acq_rel_double_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd2, [fmaximum_acq_rel_double_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b64 %rd9, [%rd2];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b64 %rd9, [%rd2];
|
||||
; SM90-NEXT: setp.eq.b64 %p3, %rd3, 0;
|
||||
; SM90-NEXT: $L__BB71_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2083,7 +2083,7 @@ define half @fsub_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB73_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2125,7 +2125,7 @@ define half @fmin_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB74_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2167,7 +2167,7 @@ define half @fmax_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val) {
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB75_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2209,7 +2209,7 @@ define half @fminimum_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val)
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB76_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2251,7 +2251,7 @@ define half @fmaximum_acq_rel_half_global_cta(ptr addrspace(1) %addr, half %val)
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB77_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2309,7 +2309,7 @@ define bfloat @fsub_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB79_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2351,7 +2351,7 @@ define bfloat @fmin_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB80_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2393,7 +2393,7 @@ define bfloat @fmax_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat %va
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB81_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2435,7 +2435,7 @@ define bfloat @fminimum_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB82_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2477,7 +2477,7 @@ define bfloat @fmaximum_acq_rel_bfloat_global_cta(ptr addrspace(1) %addr, bfloat
|
||||
; SM90-NEXT: mov.b32 %r6, 65535;
|
||||
; SM90-NEXT: shl.b32 %r7, %r6, %r1;
|
||||
; SM90-NEXT: not.b32 %r2, %r7;
|
||||
; SM90-NEXT: ld.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r14, [%rd1];
|
||||
; SM90-NEXT: $L__BB83_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: shr.u32 %r8, %r14, %r1;
|
||||
@@ -2518,7 +2518,7 @@ define i8 @add_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB84_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2555,7 +2555,7 @@ define i8 @add_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB85_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2594,7 +2594,7 @@ define i8 @add_release_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB86_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2632,7 +2632,7 @@ define i8 @add_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r15, [%rd1];
|
||||
; SM90-NEXT: $L__BB87_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: add.s32 %r10, %r15, %r4;
|
||||
@@ -2735,7 +2735,7 @@ define i8 @nand_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: $L__BB92_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -2773,7 +2773,7 @@ define i8 @nand_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: $L__BB93_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -2813,7 +2813,7 @@ define i8 @nand_release_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: $L__BB94_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -2852,7 +2852,7 @@ define i8 @nand_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %val) {
|
||||
; SM90-NEXT: shl.b32 %r2, %r9, %r1;
|
||||
; SM90-NEXT: not.b32 %r3, %r2;
|
||||
; SM90-NEXT: shl.b32 %r4, %r6, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r16, [%rd1];
|
||||
; SM90-NEXT: $L__BB95_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r10, %r16, %r4;
|
||||
@@ -2883,7 +2883,7 @@ define i32 @nand_monotonic_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM90-NEXT: // %bb.0:
|
||||
; SM90-NEXT: ld.param.b32 %r2, [nand_monotonic_i32_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [nand_monotonic_i32_global_cta_param_0];
|
||||
; SM90-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: $L__BB96_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -2909,7 +2909,7 @@ define i32 @nand_acquire_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM90-NEXT: // %bb.0:
|
||||
; SM90-NEXT: ld.param.b32 %r2, [nand_acquire_i32_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [nand_acquire_i32_global_cta_param_0];
|
||||
; SM90-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: $L__BB97_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -2937,7 +2937,7 @@ define i32 @nand_release_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM90-NEXT: ld.param.b32 %r2, [nand_release_i32_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [nand_release_i32_global_cta_param_0];
|
||||
; SM90-NEXT: fence.release.cta;
|
||||
; SM90-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: $L__BB98_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r3, %r5, %r2;
|
||||
@@ -2964,7 +2964,7 @@ define i32 @nand_seq_cst_i32_global_cta(ptr addrspace(1) %addr, i32 %val) {
|
||||
; SM90-NEXT: ld.param.b32 %r2, [nand_seq_cst_i32_global_cta_param_1];
|
||||
; SM90-NEXT: ld.param.b64 %rd1, [nand_seq_cst_i32_global_cta_param_0];
|
||||
; SM90-NEXT: fence.sc.cta;
|
||||
; SM90-NEXT: ld.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r5, [%rd1];
|
||||
; SM90-NEXT: $L__BB99_1: // %atomicrmw.start
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; SM90-NEXT: and.b32 %r3, %r5, %r2;
|
||||
|
||||
@@ -748,29 +748,36 @@ define i128 @test_atomicrmw_and(ptr %ptr, i128 %val) {
|
||||
; CHECK-LABEL: test_atomicrmw_and(
|
||||
; CHECK: {
|
||||
; CHECK-NEXT: .reg .pred %p<2>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<13>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<14>;
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: // %bb.0:
|
||||
; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [test_atomicrmw_and_param_1];
|
||||
; CHECK-NEXT: ld.param.b64 %rd3, [test_atomicrmw_and_param_0];
|
||||
; CHECK-NEXT: ld.v2.b64 {%rd11, %rd12}, [%rd3];
|
||||
; CHECK-NEXT: $L__BB34_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: and.b64 %rd6, %rd11, %rd4;
|
||||
; CHECK-NEXT: and.b64 %rd7, %rd12, %rd5;
|
||||
; CHECK-NEXT: mov.b64 %rd6, 0;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd11, %rd12};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd6, %rd7};
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd6, %rd6};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd6, %rd6};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd12, %rd13}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: $L__BB34_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: and.b64 %rd7, %rd12, %rd4;
|
||||
; CHECK-NEXT: and.b64 %rd8, %rd13, %rd5;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd12, %rd13};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd7, %rd8};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd1, %rd2}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: xor.b64 %rd8, %rd2, %rd12;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd1, %rd11;
|
||||
; CHECK-NEXT: or.b64 %rd10, %rd9, %rd8;
|
||||
; CHECK-NEXT: setp.ne.b64 %p1, %rd10, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd11, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd2;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd2, %rd13;
|
||||
; CHECK-NEXT: xor.b64 %rd10, %rd1, %rd12;
|
||||
; CHECK-NEXT: or.b64 %rd11, %rd10, %rd9;
|
||||
; CHECK-NEXT: setp.ne.b64 %p1, %rd11, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd13, %rd2;
|
||||
; CHECK-NEXT: @%p1 bra $L__BB34_1;
|
||||
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
|
||||
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd1, %rd2};
|
||||
@@ -783,29 +790,36 @@ define i128 @test_atomicrmw_or(ptr %ptr, i128 %val) {
|
||||
; CHECK-LABEL: test_atomicrmw_or(
|
||||
; CHECK: {
|
||||
; CHECK-NEXT: .reg .pred %p<2>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<13>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<14>;
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: // %bb.0:
|
||||
; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [test_atomicrmw_or_param_1];
|
||||
; CHECK-NEXT: ld.param.b64 %rd3, [test_atomicrmw_or_param_0];
|
||||
; CHECK-NEXT: ld.v2.b64 {%rd11, %rd12}, [%rd3];
|
||||
; CHECK-NEXT: $L__BB35_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: or.b64 %rd6, %rd11, %rd4;
|
||||
; CHECK-NEXT: or.b64 %rd7, %rd12, %rd5;
|
||||
; CHECK-NEXT: mov.b64 %rd6, 0;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd11, %rd12};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd6, %rd7};
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd6, %rd6};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd6, %rd6};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd12, %rd13}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: $L__BB35_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: or.b64 %rd7, %rd12, %rd4;
|
||||
; CHECK-NEXT: or.b64 %rd8, %rd13, %rd5;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd12, %rd13};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd7, %rd8};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd1, %rd2}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: xor.b64 %rd8, %rd2, %rd12;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd1, %rd11;
|
||||
; CHECK-NEXT: or.b64 %rd10, %rd9, %rd8;
|
||||
; CHECK-NEXT: setp.ne.b64 %p1, %rd10, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd11, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd2;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd2, %rd13;
|
||||
; CHECK-NEXT: xor.b64 %rd10, %rd1, %rd12;
|
||||
; CHECK-NEXT: or.b64 %rd11, %rd10, %rd9;
|
||||
; CHECK-NEXT: setp.ne.b64 %p1, %rd11, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd13, %rd2;
|
||||
; CHECK-NEXT: @%p1 bra $L__BB35_1;
|
||||
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
|
||||
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd1, %rd2};
|
||||
@@ -818,29 +832,36 @@ define i128 @test_atomicrmw_xor(ptr %ptr, i128 %val) {
|
||||
; CHECK-LABEL: test_atomicrmw_xor(
|
||||
; CHECK: {
|
||||
; CHECK-NEXT: .reg .pred %p<2>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<13>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<14>;
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: // %bb.0:
|
||||
; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [test_atomicrmw_xor_param_1];
|
||||
; CHECK-NEXT: ld.param.b64 %rd3, [test_atomicrmw_xor_param_0];
|
||||
; CHECK-NEXT: ld.v2.b64 {%rd11, %rd12}, [%rd3];
|
||||
; CHECK-NEXT: $L__BB36_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: xor.b64 %rd6, %rd11, %rd4;
|
||||
; CHECK-NEXT: xor.b64 %rd7, %rd12, %rd5;
|
||||
; CHECK-NEXT: mov.b64 %rd6, 0;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd11, %rd12};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd6, %rd7};
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd6, %rd6};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd6, %rd6};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd12, %rd13}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: $L__BB36_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: xor.b64 %rd7, %rd12, %rd4;
|
||||
; CHECK-NEXT: xor.b64 %rd8, %rd13, %rd5;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd12, %rd13};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd7, %rd8};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd1, %rd2}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: xor.b64 %rd8, %rd2, %rd12;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd1, %rd11;
|
||||
; CHECK-NEXT: or.b64 %rd10, %rd9, %rd8;
|
||||
; CHECK-NEXT: setp.ne.b64 %p1, %rd10, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd11, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd2;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd2, %rd13;
|
||||
; CHECK-NEXT: xor.b64 %rd10, %rd1, %rd12;
|
||||
; CHECK-NEXT: or.b64 %rd11, %rd10, %rd9;
|
||||
; CHECK-NEXT: setp.ne.b64 %p1, %rd11, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd13, %rd2;
|
||||
; CHECK-NEXT: @%p1 bra $L__BB36_1;
|
||||
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
|
||||
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd1, %rd2};
|
||||
@@ -853,34 +874,41 @@ define i128 @test_atomicrmw_min(ptr %ptr, i128 %val) {
|
||||
; CHECK-LABEL: test_atomicrmw_min(
|
||||
; CHECK: {
|
||||
; CHECK-NEXT: .reg .pred %p<7>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<13>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<14>;
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: // %bb.0:
|
||||
; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [test_atomicrmw_min_param_1];
|
||||
; CHECK-NEXT: ld.param.b64 %rd3, [test_atomicrmw_min_param_0];
|
||||
; CHECK-NEXT: ld.v2.b64 {%rd11, %rd12}, [%rd3];
|
||||
; CHECK-NEXT: $L__BB37_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: setp.lt.u64 %p1, %rd11, %rd4;
|
||||
; CHECK-NEXT: setp.eq.b64 %p2, %rd12, %rd5;
|
||||
; CHECK-NEXT: and.pred %p3, %p2, %p1;
|
||||
; CHECK-NEXT: setp.lt.s64 %p4, %rd12, %rd5;
|
||||
; CHECK-NEXT: or.pred %p5, %p3, %p4;
|
||||
; CHECK-NEXT: selp.b64 %rd6, %rd12, %rd5, %p5;
|
||||
; CHECK-NEXT: selp.b64 %rd7, %rd11, %rd4, %p5;
|
||||
; CHECK-NEXT: mov.b64 %rd6, 0;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd11, %rd12};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd7, %rd6};
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd6, %rd6};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd6, %rd6};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd12, %rd13}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: $L__BB37_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: setp.lt.u64 %p1, %rd12, %rd4;
|
||||
; CHECK-NEXT: setp.eq.b64 %p2, %rd13, %rd5;
|
||||
; CHECK-NEXT: and.pred %p3, %p2, %p1;
|
||||
; CHECK-NEXT: setp.lt.s64 %p4, %rd13, %rd5;
|
||||
; CHECK-NEXT: or.pred %p5, %p3, %p4;
|
||||
; CHECK-NEXT: selp.b64 %rd7, %rd13, %rd5, %p5;
|
||||
; CHECK-NEXT: selp.b64 %rd8, %rd12, %rd4, %p5;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd12, %rd13};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd8, %rd7};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd1, %rd2}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: xor.b64 %rd8, %rd2, %rd12;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd1, %rd11;
|
||||
; CHECK-NEXT: or.b64 %rd10, %rd9, %rd8;
|
||||
; CHECK-NEXT: setp.ne.b64 %p6, %rd10, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd11, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd2;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd2, %rd13;
|
||||
; CHECK-NEXT: xor.b64 %rd10, %rd1, %rd12;
|
||||
; CHECK-NEXT: or.b64 %rd11, %rd10, %rd9;
|
||||
; CHECK-NEXT: setp.ne.b64 %p6, %rd11, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd13, %rd2;
|
||||
; CHECK-NEXT: @%p6 bra $L__BB37_1;
|
||||
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
|
||||
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd1, %rd2};
|
||||
@@ -893,34 +921,41 @@ define i128 @test_atomicrmw_max(ptr %ptr, i128 %val) {
|
||||
; CHECK-LABEL: test_atomicrmw_max(
|
||||
; CHECK: {
|
||||
; CHECK-NEXT: .reg .pred %p<7>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<13>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<14>;
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: // %bb.0:
|
||||
; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [test_atomicrmw_max_param_1];
|
||||
; CHECK-NEXT: ld.param.b64 %rd3, [test_atomicrmw_max_param_0];
|
||||
; CHECK-NEXT: ld.v2.b64 {%rd11, %rd12}, [%rd3];
|
||||
; CHECK-NEXT: $L__BB38_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: setp.gt.u64 %p1, %rd11, %rd4;
|
||||
; CHECK-NEXT: setp.eq.b64 %p2, %rd12, %rd5;
|
||||
; CHECK-NEXT: and.pred %p3, %p2, %p1;
|
||||
; CHECK-NEXT: setp.gt.s64 %p4, %rd12, %rd5;
|
||||
; CHECK-NEXT: or.pred %p5, %p3, %p4;
|
||||
; CHECK-NEXT: selp.b64 %rd6, %rd12, %rd5, %p5;
|
||||
; CHECK-NEXT: selp.b64 %rd7, %rd11, %rd4, %p5;
|
||||
; CHECK-NEXT: mov.b64 %rd6, 0;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd11, %rd12};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd7, %rd6};
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd6, %rd6};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd6, %rd6};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd12, %rd13}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: $L__BB38_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: setp.gt.u64 %p1, %rd12, %rd4;
|
||||
; CHECK-NEXT: setp.eq.b64 %p2, %rd13, %rd5;
|
||||
; CHECK-NEXT: and.pred %p3, %p2, %p1;
|
||||
; CHECK-NEXT: setp.gt.s64 %p4, %rd13, %rd5;
|
||||
; CHECK-NEXT: or.pred %p5, %p3, %p4;
|
||||
; CHECK-NEXT: selp.b64 %rd7, %rd13, %rd5, %p5;
|
||||
; CHECK-NEXT: selp.b64 %rd8, %rd12, %rd4, %p5;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd12, %rd13};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd8, %rd7};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd1, %rd2}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: xor.b64 %rd8, %rd2, %rd12;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd1, %rd11;
|
||||
; CHECK-NEXT: or.b64 %rd10, %rd9, %rd8;
|
||||
; CHECK-NEXT: setp.ne.b64 %p6, %rd10, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd11, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd2;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd2, %rd13;
|
||||
; CHECK-NEXT: xor.b64 %rd10, %rd1, %rd12;
|
||||
; CHECK-NEXT: or.b64 %rd11, %rd10, %rd9;
|
||||
; CHECK-NEXT: setp.ne.b64 %p6, %rd11, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd13, %rd2;
|
||||
; CHECK-NEXT: @%p6 bra $L__BB38_1;
|
||||
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
|
||||
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd1, %rd2};
|
||||
@@ -933,34 +968,41 @@ define i128 @test_atomicrmw_umin(ptr %ptr, i128 %val) {
|
||||
; CHECK-LABEL: test_atomicrmw_umin(
|
||||
; CHECK: {
|
||||
; CHECK-NEXT: .reg .pred %p<7>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<13>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<14>;
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: // %bb.0:
|
||||
; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [test_atomicrmw_umin_param_1];
|
||||
; CHECK-NEXT: ld.param.b64 %rd3, [test_atomicrmw_umin_param_0];
|
||||
; CHECK-NEXT: ld.v2.b64 {%rd11, %rd12}, [%rd3];
|
||||
; CHECK-NEXT: $L__BB39_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: setp.lt.u64 %p1, %rd11, %rd4;
|
||||
; CHECK-NEXT: setp.eq.b64 %p2, %rd12, %rd5;
|
||||
; CHECK-NEXT: and.pred %p3, %p2, %p1;
|
||||
; CHECK-NEXT: setp.lt.u64 %p4, %rd12, %rd5;
|
||||
; CHECK-NEXT: or.pred %p5, %p3, %p4;
|
||||
; CHECK-NEXT: selp.b64 %rd6, %rd12, %rd5, %p5;
|
||||
; CHECK-NEXT: selp.b64 %rd7, %rd11, %rd4, %p5;
|
||||
; CHECK-NEXT: mov.b64 %rd6, 0;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd11, %rd12};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd7, %rd6};
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd6, %rd6};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd6, %rd6};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd12, %rd13}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: $L__BB39_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: setp.lt.u64 %p1, %rd12, %rd4;
|
||||
; CHECK-NEXT: setp.eq.b64 %p2, %rd13, %rd5;
|
||||
; CHECK-NEXT: and.pred %p3, %p2, %p1;
|
||||
; CHECK-NEXT: setp.lt.u64 %p4, %rd13, %rd5;
|
||||
; CHECK-NEXT: or.pred %p5, %p3, %p4;
|
||||
; CHECK-NEXT: selp.b64 %rd7, %rd13, %rd5, %p5;
|
||||
; CHECK-NEXT: selp.b64 %rd8, %rd12, %rd4, %p5;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd12, %rd13};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd8, %rd7};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd1, %rd2}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: xor.b64 %rd8, %rd2, %rd12;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd1, %rd11;
|
||||
; CHECK-NEXT: or.b64 %rd10, %rd9, %rd8;
|
||||
; CHECK-NEXT: setp.ne.b64 %p6, %rd10, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd11, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd2;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd2, %rd13;
|
||||
; CHECK-NEXT: xor.b64 %rd10, %rd1, %rd12;
|
||||
; CHECK-NEXT: or.b64 %rd11, %rd10, %rd9;
|
||||
; CHECK-NEXT: setp.ne.b64 %p6, %rd11, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd13, %rd2;
|
||||
; CHECK-NEXT: @%p6 bra $L__BB39_1;
|
||||
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
|
||||
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd1, %rd2};
|
||||
@@ -973,34 +1015,41 @@ define i128 @test_atomicrmw_umax(ptr %ptr, i128 %val) {
|
||||
; CHECK-LABEL: test_atomicrmw_umax(
|
||||
; CHECK: {
|
||||
; CHECK-NEXT: .reg .pred %p<7>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<13>;
|
||||
; CHECK-NEXT: .reg .b64 %rd<14>;
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: // %bb.0:
|
||||
; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [test_atomicrmw_umax_param_1];
|
||||
; CHECK-NEXT: ld.param.b64 %rd3, [test_atomicrmw_umax_param_0];
|
||||
; CHECK-NEXT: ld.v2.b64 {%rd11, %rd12}, [%rd3];
|
||||
; CHECK-NEXT: $L__BB40_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: setp.gt.u64 %p1, %rd11, %rd4;
|
||||
; CHECK-NEXT: setp.eq.b64 %p2, %rd12, %rd5;
|
||||
; CHECK-NEXT: and.pred %p3, %p2, %p1;
|
||||
; CHECK-NEXT: setp.gt.u64 %p4, %rd12, %rd5;
|
||||
; CHECK-NEXT: or.pred %p5, %p3, %p4;
|
||||
; CHECK-NEXT: selp.b64 %rd6, %rd12, %rd5, %p5;
|
||||
; CHECK-NEXT: selp.b64 %rd7, %rd11, %rd4, %p5;
|
||||
; CHECK-NEXT: mov.b64 %rd6, 0;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd11, %rd12};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd7, %rd6};
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd6, %rd6};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd6, %rd6};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd12, %rd13}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: $L__BB40_1: // %atomicrmw.start
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: setp.gt.u64 %p1, %rd12, %rd4;
|
||||
; CHECK-NEXT: setp.eq.b64 %p2, %rd13, %rd5;
|
||||
; CHECK-NEXT: and.pred %p3, %p2, %p1;
|
||||
; CHECK-NEXT: setp.gt.u64 %p4, %rd13, %rd5;
|
||||
; CHECK-NEXT: or.pred %p5, %p3, %p4;
|
||||
; CHECK-NEXT: selp.b64 %rd7, %rd13, %rd5, %p5;
|
||||
; CHECK-NEXT: selp.b64 %rd8, %rd12, %rd4, %p5;
|
||||
; CHECK-NEXT: {
|
||||
; CHECK-NEXT: .reg .b128 cmp, swap, dst;
|
||||
; CHECK-NEXT: mov.b128 cmp, {%rd12, %rd13};
|
||||
; CHECK-NEXT: mov.b128 swap, {%rd8, %rd7};
|
||||
; CHECK-NEXT: atom.relaxed.sys.cas.b128 dst, [%rd3], cmp, swap;
|
||||
; CHECK-NEXT: mov.b128 {%rd1, %rd2}, dst;
|
||||
; CHECK-NEXT: }
|
||||
; CHECK-NEXT: xor.b64 %rd8, %rd2, %rd12;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd1, %rd11;
|
||||
; CHECK-NEXT: or.b64 %rd10, %rd9, %rd8;
|
||||
; CHECK-NEXT: setp.ne.b64 %p6, %rd10, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd11, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd2;
|
||||
; CHECK-NEXT: xor.b64 %rd9, %rd2, %rd13;
|
||||
; CHECK-NEXT: xor.b64 %rd10, %rd1, %rd12;
|
||||
; CHECK-NEXT: or.b64 %rd11, %rd10, %rd9;
|
||||
; CHECK-NEXT: setp.ne.b64 %p6, %rd11, 0;
|
||||
; CHECK-NEXT: mov.b64 %rd12, %rd1;
|
||||
; CHECK-NEXT: mov.b64 %rd13, %rd2;
|
||||
; CHECK-NEXT: @%p6 bra $L__BB40_1;
|
||||
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
|
||||
; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd1, %rd2};
|
||||
|
||||
@@ -25,7 +25,7 @@ define i8 @monotonic_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB0_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -69,7 +69,7 @@ define i8 @monotonic_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB1_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -115,7 +115,7 @@ define i8 @monotonic_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB2_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -160,7 +160,7 @@ define i8 @acquire_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB3_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -205,7 +205,7 @@ define i8 @acquire_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB4_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -251,7 +251,7 @@ define i8 @acquire_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB5_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -297,7 +297,7 @@ define i8 @release_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB6_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -342,7 +342,7 @@ define i8 @release_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB7_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -388,7 +388,7 @@ define i8 @release_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB8_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -434,7 +434,7 @@ define i8 @acq_rel_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB9_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -480,7 +480,7 @@ define i8 @acq_rel_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB10_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -526,7 +526,7 @@ define i8 @acq_rel_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB11_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -572,7 +572,7 @@ define i8 @seq_cst_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB12_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -618,7 +618,7 @@ define i8 @seq_cst_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB13_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -664,7 +664,7 @@ define i8 @seq_cst_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB14_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -709,7 +709,7 @@ define i16 @monotonic_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp,
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB15_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -753,7 +753,7 @@ define i16 @monotonic_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB16_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -799,7 +799,7 @@ define i16 @monotonic_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB17_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -844,7 +844,7 @@ define i16 @acquire_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB18_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -889,7 +889,7 @@ define i16 @acquire_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB19_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -935,7 +935,7 @@ define i16 @acquire_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB20_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -981,7 +981,7 @@ define i16 @release_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB21_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1026,7 +1026,7 @@ define i16 @release_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB22_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1072,7 +1072,7 @@ define i16 @release_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB23_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1118,7 +1118,7 @@ define i16 @acq_rel_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB24_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1164,7 +1164,7 @@ define i16 @acq_rel_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB25_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1210,7 +1210,7 @@ define i16 @acq_rel_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB26_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1256,7 +1256,7 @@ define i16 @seq_cst_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB27_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1302,7 +1302,7 @@ define i16 @seq_cst_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB28_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1348,7 +1348,7 @@ define i16 @seq_cst_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB29_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1903,7 +1903,7 @@ define i8 @acq_rel_acquire_i8_global(ptr addrspace(1) %addr, i8 %cmp, i8 %new) {
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.global.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB60_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2000,7 +2000,7 @@ define i8 @acq_rel_acquire_i8_generic_cta(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB64_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2046,7 +2046,7 @@ define i8 @acq_rel_acquire_i8_shared_cta(ptr addrspace(3) %addr, i8 %cmp, i8 %ne
|
||||
; SM60-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM60-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM60-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM60-NEXT: ld.shared.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: ld.volatile.shared.b32 %r13, [%rd1];
|
||||
; SM60-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM60-NEXT: $L__BB65_1: // %partword.cmpxchg.loop
|
||||
; SM60-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
|
||||
@@ -25,7 +25,7 @@ define i8 @monotonic_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB0_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -69,7 +69,7 @@ define i8 @monotonic_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB1_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -115,7 +115,7 @@ define i8 @monotonic_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB2_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -160,7 +160,7 @@ define i8 @acquire_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB3_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -205,7 +205,7 @@ define i8 @acquire_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB4_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -251,7 +251,7 @@ define i8 @acquire_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB5_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -297,7 +297,7 @@ define i8 @release_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB6_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -342,7 +342,7 @@ define i8 @release_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB7_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -388,7 +388,7 @@ define i8 @release_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB8_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -434,7 +434,7 @@ define i8 @acq_rel_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB9_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -480,7 +480,7 @@ define i8 @acq_rel_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB10_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -526,7 +526,7 @@ define i8 @acq_rel_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB11_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -572,7 +572,7 @@ define i8 @seq_cst_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB12_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -618,7 +618,7 @@ define i8 @seq_cst_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB13_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -664,7 +664,7 @@ define i8 @seq_cst_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB14_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -709,7 +709,7 @@ define i16 @monotonic_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp,
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB15_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -753,7 +753,7 @@ define i16 @monotonic_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB16_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -799,7 +799,7 @@ define i16 @monotonic_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB17_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -844,7 +844,7 @@ define i16 @acquire_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB18_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -889,7 +889,7 @@ define i16 @acquire_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB19_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -935,7 +935,7 @@ define i16 @acquire_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB20_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -981,7 +981,7 @@ define i16 @release_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB21_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1026,7 +1026,7 @@ define i16 @release_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB22_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1072,7 +1072,7 @@ define i16 @release_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB23_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1118,7 +1118,7 @@ define i16 @acq_rel_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB24_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1164,7 +1164,7 @@ define i16 @acq_rel_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB25_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1210,7 +1210,7 @@ define i16 @acq_rel_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB26_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1256,7 +1256,7 @@ define i16 @seq_cst_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB27_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1302,7 +1302,7 @@ define i16 @seq_cst_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB28_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1348,7 +1348,7 @@ define i16 @seq_cst_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB29_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1903,7 +1903,7 @@ define i8 @acq_rel_acquire_i8_global(ptr addrspace(1) %addr, i8 %cmp, i8 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.global.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB60_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2000,7 +2000,7 @@ define i8 @acq_rel_acquire_i8_generic_cta(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB64_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2046,7 +2046,7 @@ define i8 @acq_rel_acquire_i8_shared_cta(ptr addrspace(3) %addr, i8 %cmp, i8 %ne
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.shared.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.cta.shared.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB65_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
|
||||
@@ -25,7 +25,7 @@ define i8 @monotonic_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB0_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -69,7 +69,7 @@ define i8 @monotonic_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB1_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -115,7 +115,7 @@ define i8 @monotonic_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB2_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -160,7 +160,7 @@ define i8 @acquire_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB3_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -205,7 +205,7 @@ define i8 @acquire_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB4_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -251,7 +251,7 @@ define i8 @acquire_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB5_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -297,7 +297,7 @@ define i8 @release_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB6_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -342,7 +342,7 @@ define i8 @release_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB7_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -388,7 +388,7 @@ define i8 @release_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB8_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -434,7 +434,7 @@ define i8 @acq_rel_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB9_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -480,7 +480,7 @@ define i8 @acq_rel_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB10_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -526,7 +526,7 @@ define i8 @acq_rel_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB11_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -572,7 +572,7 @@ define i8 @seq_cst_monotonic_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB12_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -618,7 +618,7 @@ define i8 @seq_cst_acquire_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB13_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -664,7 +664,7 @@ define i8 @seq_cst_seq_cst_i8_global_cta(ptr addrspace(1) %addr, i8 %cmp, i8 %ne
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB14_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -709,7 +709,7 @@ define i16 @monotonic_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp,
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB15_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -753,7 +753,7 @@ define i16 @monotonic_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB16_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -799,7 +799,7 @@ define i16 @monotonic_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB17_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -844,7 +844,7 @@ define i16 @acquire_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB18_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -889,7 +889,7 @@ define i16 @acquire_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB19_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -935,7 +935,7 @@ define i16 @acquire_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB20_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -981,7 +981,7 @@ define i16 @release_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB21_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1026,7 +1026,7 @@ define i16 @release_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB22_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1072,7 +1072,7 @@ define i16 @release_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB23_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1118,7 +1118,7 @@ define i16 @acq_rel_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB24_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1164,7 +1164,7 @@ define i16 @acq_rel_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB25_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1210,7 +1210,7 @@ define i16 @acq_rel_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB26_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1256,7 +1256,7 @@ define i16 @seq_cst_monotonic_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB27_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1302,7 +1302,7 @@ define i16 @seq_cst_acquire_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB28_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1348,7 +1348,7 @@ define i16 @seq_cst_seq_cst_i16_global_cta(ptr addrspace(1) %addr, i16 %cmp, i16
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB29_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1903,7 +1903,7 @@ define i8 @acq_rel_acquire_i8_global(ptr addrspace(1) %addr, i8 %cmp, i8 %new) {
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.sys.global.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB60_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2017,7 +2017,7 @@ define i8 @acq_rel_acquire_i8_generic_cta(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB65_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -2063,7 +2063,7 @@ define i8 @acq_rel_acquire_i8_shared_cta(ptr addrspace(3) %addr, i8 %cmp, i8 %ne
|
||||
; SM90-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM90-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM90-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM90-NEXT: ld.shared.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: ld.relaxed.cta.shared.b32 %r13, [%rd1];
|
||||
; SM90-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM90-NEXT: $L__BB66_1: // %partword.cmpxchg.loop
|
||||
; SM90-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
|
||||
@@ -31,7 +31,7 @@ define i8 @relaxed_sys_i8(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM30-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM30-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM30-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM30-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM30-NEXT: $L__BB0_1: // %partword.cmpxchg.loop
|
||||
; SM30-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -71,7 +71,7 @@ define i8 @relaxed_sys_i8(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB0_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -155,7 +155,7 @@ define i8 @acquire_sys_i8(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM30-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM30-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM30-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM30-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM30-NEXT: $L__BB1_1: // %partword.cmpxchg.loop
|
||||
; SM30-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -196,7 +196,7 @@ define i8 @acquire_sys_i8(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB1_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -283,7 +283,7 @@ define i8 @release_sys_i8(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM30-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM30-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM30-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM30-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM30-NEXT: $L__BB2_1: // %partword.cmpxchg.loop
|
||||
; SM30-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -324,7 +324,7 @@ define i8 @release_sys_i8(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB2_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -410,7 +410,7 @@ define i8 @acq_rel_sys_i8(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM30-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM30-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM30-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM30-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM30-NEXT: $L__BB3_1: // %partword.cmpxchg.loop
|
||||
; SM30-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -452,7 +452,7 @@ define i8 @acq_rel_sys_i8(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB3_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -540,7 +540,7 @@ define i8 @seq_cst_sys_i8(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM30-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM30-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM30-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM30-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM30-NEXT: $L__BB4_1: // %partword.cmpxchg.loop
|
||||
; SM30-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -582,7 +582,7 @@ define i8 @seq_cst_sys_i8(ptr %addr, i8 %cmp, i8 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB4_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -670,7 +670,7 @@ define i16 @relaxed_sys_i16(ptr %addr, i16 %cmp, i16 %new) {
|
||||
; SM30-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM30-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM30-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM30-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM30-NEXT: $L__BB5_1: // %partword.cmpxchg.loop
|
||||
; SM30-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -710,7 +710,7 @@ define i16 @relaxed_sys_i16(ptr %addr, i16 %cmp, i16 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB5_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -793,7 +793,7 @@ define i16 @acquire_sys_i16(ptr %addr, i16 %cmp, i16 %new) {
|
||||
; SM30-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM30-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM30-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM30-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM30-NEXT: $L__BB6_1: // %partword.cmpxchg.loop
|
||||
; SM30-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -834,7 +834,7 @@ define i16 @acquire_sys_i16(ptr %addr, i16 %cmp, i16 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB6_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -920,7 +920,7 @@ define i16 @release_sys_i16(ptr %addr, i16 %cmp, i16 %new) {
|
||||
; SM30-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM30-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM30-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM30-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM30-NEXT: $L__BB7_1: // %partword.cmpxchg.loop
|
||||
; SM30-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -961,7 +961,7 @@ define i16 @release_sys_i16(ptr %addr, i16 %cmp, i16 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB7_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1046,7 +1046,7 @@ define i16 @acq_rel_sys_i16(ptr %addr, i16 %cmp, i16 %new) {
|
||||
; SM30-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM30-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM30-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM30-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM30-NEXT: $L__BB8_1: // %partword.cmpxchg.loop
|
||||
; SM30-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1088,7 +1088,7 @@ define i16 @acq_rel_sys_i16(ptr %addr, i16 %cmp, i16 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB8_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1176,7 +1176,7 @@ define i16 @seq_cst_sys_i16(ptr %addr, i16 %cmp, i16 %new) {
|
||||
; SM30-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM30-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM30-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM30-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: ld.volatile.b32 %r13, [%rd1];
|
||||
; SM30-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM30-NEXT: $L__BB9_1: // %partword.cmpxchg.loop
|
||||
; SM30-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -1218,7 +1218,7 @@ define i16 @seq_cst_sys_i16(ptr %addr, i16 %cmp, i16 %new) {
|
||||
; SM70-NEXT: cvt.u32.u16 %r12, %rs1;
|
||||
; SM70-NEXT: shl.b32 %r3, %r12, %r1;
|
||||
; SM70-NEXT: shl.b32 %r4, %r7, %r1;
|
||||
; SM70-NEXT: ld.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: ld.relaxed.sys.b32 %r13, [%rd1];
|
||||
; SM70-NEXT: and.b32 %r16, %r13, %r2;
|
||||
; SM70-NEXT: $L__BB9_1: // %partword.cmpxchg.loop
|
||||
; SM70-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
|
||||
@@ -208,7 +208,7 @@ define void @test_distributed_shared_cluster_cmpxchg(ptr addrspace(7) %dsmem_ptr
|
||||
; CHECK-NEXT: not.b32 %r2, %r26;
|
||||
; CHECK-NEXT: mov.b32 %r27, 1;
|
||||
; CHECK-NEXT: shl.b32 %r3, %r27, %r1;
|
||||
; CHECK-NEXT: ld.shared::cluster.b32 %r28, [%rd1];
|
||||
; CHECK-NEXT: ld.relaxed.sys.shared::cluster.b32 %r28, [%rd1];
|
||||
; CHECK-NEXT: and.b32 %r38, %r28, %r2;
|
||||
; CHECK-NEXT: $L__BB4_1: // %partword.cmpxchg.loop33
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -223,7 +223,7 @@ define void @test_distributed_shared_cluster_cmpxchg(ptr addrspace(7) %dsmem_ptr
|
||||
; CHECK-NEXT: mov.b32 %r38, %r5;
|
||||
; CHECK-NEXT: @%p2 bra $L__BB4_1;
|
||||
; CHECK-NEXT: $L__BB4_3: // %partword.cmpxchg.end31
|
||||
; CHECK-NEXT: ld.shared::cluster.b32 %r30, [%rd1];
|
||||
; CHECK-NEXT: ld.relaxed.sys.shared::cluster.b32 %r30, [%rd1];
|
||||
; CHECK-NEXT: and.b32 %r39, %r30, %r2;
|
||||
; CHECK-NEXT: $L__BB4_4: // %partword.cmpxchg.loop23
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -240,7 +240,7 @@ define void @test_distributed_shared_cluster_cmpxchg(ptr addrspace(7) %dsmem_ptr
|
||||
; CHECK-NEXT: $L__BB4_6: // %partword.cmpxchg.end21
|
||||
; CHECK-NEXT: fence.acq_rel.sys;
|
||||
; CHECK-NEXT: fence.acq_rel.sys;
|
||||
; CHECK-NEXT: ld.shared::cluster.b32 %r32, [%rd1];
|
||||
; CHECK-NEXT: ld.relaxed.sys.shared::cluster.b32 %r32, [%rd1];
|
||||
; CHECK-NEXT: and.b32 %r40, %r32, %r2;
|
||||
; CHECK-NEXT: $L__BB4_7: // %partword.cmpxchg.loop13
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -256,7 +256,7 @@ define void @test_distributed_shared_cluster_cmpxchg(ptr addrspace(7) %dsmem_ptr
|
||||
; CHECK-NEXT: @%p6 bra $L__BB4_7;
|
||||
; CHECK-NEXT: $L__BB4_9: // %partword.cmpxchg.end11
|
||||
; CHECK-NEXT: fence.acq_rel.sys;
|
||||
; CHECK-NEXT: ld.shared::cluster.b32 %r34, [%rd1];
|
||||
; CHECK-NEXT: ld.relaxed.sys.shared::cluster.b32 %r34, [%rd1];
|
||||
; CHECK-NEXT: and.b32 %r41, %r34, %r2;
|
||||
; CHECK-NEXT: $L__BB4_10: // %partword.cmpxchg.loop3
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
@@ -273,7 +273,7 @@ define void @test_distributed_shared_cluster_cmpxchg(ptr addrspace(7) %dsmem_ptr
|
||||
; CHECK-NEXT: $L__BB4_12: // %partword.cmpxchg.end1
|
||||
; CHECK-NEXT: fence.acq_rel.sys;
|
||||
; CHECK-NEXT: fence.sc.sys;
|
||||
; CHECK-NEXT: ld.shared::cluster.b32 %r36, [%rd1];
|
||||
; CHECK-NEXT: ld.relaxed.sys.shared::cluster.b32 %r36, [%rd1];
|
||||
; CHECK-NEXT: and.b32 %r42, %r36, %r2;
|
||||
; CHECK-NEXT: $L__BB4_13: // %partword.cmpxchg.loop
|
||||
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
|
||||
|
||||
@@ -907,11 +907,17 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
|
||||
; AIX32-NEXT: nop
|
||||
; AIX32-NEXT: lwz 31, L..C8(2) # @u128
|
||||
; AIX32-NEXT: addi 30, 1, 72
|
||||
; AIX32-NEXT: li 3, 16
|
||||
; AIX32-NEXT: mr 5, 30
|
||||
; AIX32-NEXT: li 6, 0
|
||||
; AIX32-NEXT: mr 4, 31
|
||||
; AIX32-NEXT: bl .__atomic_load[PR]
|
||||
; AIX32-NEXT: nop
|
||||
; AIX32-NEXT: lwz 5, 84(1)
|
||||
; AIX32-NEXT: lwz 4, 80(1)
|
||||
; AIX32-NEXT: lwz 6, 76(1)
|
||||
; AIX32-NEXT: lwz 7, 72(1)
|
||||
; AIX32-NEXT: addi 29, 1, 56
|
||||
; AIX32-NEXT: lwz 5, 12(31)
|
||||
; AIX32-NEXT: lwz 4, 8(31)
|
||||
; AIX32-NEXT: lwz 6, 4(31)
|
||||
; AIX32-NEXT: lwz 7, 0(31)
|
||||
; AIX32-NEXT: .align 4
|
||||
; AIX32-NEXT: L..BB0_49: # %atomicrmw.start2
|
||||
; AIX32-NEXT: #
|
||||
@@ -941,11 +947,17 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
|
||||
; AIX32-NEXT: # %bb.50: # %atomicrmw.end1
|
||||
; AIX32-NEXT: lwz 31, L..C9(2) # @s128
|
||||
; AIX32-NEXT: addi 30, 1, 72
|
||||
; AIX32-NEXT: li 3, 16
|
||||
; AIX32-NEXT: mr 5, 30
|
||||
; AIX32-NEXT: li 6, 0
|
||||
; AIX32-NEXT: mr 4, 31
|
||||
; AIX32-NEXT: bl .__atomic_load[PR]
|
||||
; AIX32-NEXT: nop
|
||||
; AIX32-NEXT: lwz 5, 84(1)
|
||||
; AIX32-NEXT: lwz 4, 80(1)
|
||||
; AIX32-NEXT: lwz 6, 76(1)
|
||||
; AIX32-NEXT: lwz 7, 72(1)
|
||||
; AIX32-NEXT: addi 29, 1, 56
|
||||
; AIX32-NEXT: lwz 5, 12(31)
|
||||
; AIX32-NEXT: lwz 4, 8(31)
|
||||
; AIX32-NEXT: lwz 6, 4(31)
|
||||
; AIX32-NEXT: lwz 7, 0(31)
|
||||
; AIX32-NEXT: .align 4
|
||||
; AIX32-NEXT: L..BB0_51: # %atomicrmw.start
|
||||
; AIX32-NEXT: #
|
||||
@@ -3853,15 +3865,21 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
|
||||
; AIX32-NEXT: nop
|
||||
; AIX32-NEXT: nand 3, 4, 29
|
||||
; AIX32-NEXT: lwz 29, L..C8(2) # @u128
|
||||
; AIX32-NEXT: lbz 23, 0(26)
|
||||
; AIX32-NEXT: addi 28, 1, 80
|
||||
; AIX32-NEXT: addi 27, 1, 64
|
||||
; AIX32-NEXT: lbz 23, 0(26)
|
||||
; AIX32-NEXT: li 6, 0
|
||||
; AIX32-NEXT: stw 17, 0(30)
|
||||
; AIX32-NEXT: lwz 4, 12(29)
|
||||
; AIX32-NEXT: lwz 5, 8(29)
|
||||
; AIX32-NEXT: lwz 6, 4(29)
|
||||
; AIX32-NEXT: lwz 7, 0(29)
|
||||
; AIX32-NEXT: mr 5, 28
|
||||
; AIX32-NEXT: stw 3, 4(30)
|
||||
; AIX32-NEXT: li 3, 16
|
||||
; AIX32-NEXT: mr 4, 29
|
||||
; AIX32-NEXT: bl .__atomic_load[PR]
|
||||
; AIX32-NEXT: nop
|
||||
; AIX32-NEXT: lwz 4, 92(1)
|
||||
; AIX32-NEXT: lwz 5, 88(1)
|
||||
; AIX32-NEXT: lwz 6, 84(1)
|
||||
; AIX32-NEXT: lwz 7, 80(1)
|
||||
; AIX32-NEXT: addi 27, 1, 64
|
||||
; AIX32-NEXT: .align 4
|
||||
; AIX32-NEXT: L..BB2_61: # %atomicrmw.start2
|
||||
; AIX32-NEXT: #
|
||||
@@ -3891,19 +3909,25 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
|
||||
; AIX32-NEXT: beq 0, L..BB2_61
|
||||
; AIX32-NEXT: # %bb.62: # %atomicrmw.end1
|
||||
; AIX32-NEXT: and 3, 4, 23
|
||||
; AIX32-NEXT: lwz 28, L..C9(2) # @s128
|
||||
; AIX32-NEXT: stw 17, 0(29)
|
||||
; AIX32-NEXT: lbz 23, 0(26)
|
||||
; AIX32-NEXT: stw 17, 4(29)
|
||||
; AIX32-NEXT: stw 17, 8(29)
|
||||
; AIX32-NEXT: xor 3, 3, 17
|
||||
; AIX32-NEXT: addi 28, 1, 80
|
||||
; AIX32-NEXT: addi 27, 1, 64
|
||||
; AIX32-NEXT: lbz 23, 0(26)
|
||||
; AIX32-NEXT: li 6, 0
|
||||
; AIX32-NEXT: mr 4, 28
|
||||
; AIX32-NEXT: stw 3, 12(29)
|
||||
; AIX32-NEXT: lwz 29, L..C9(2) # @s128
|
||||
; AIX32-NEXT: lwz 4, 12(29)
|
||||
; AIX32-NEXT: lwz 5, 8(29)
|
||||
; AIX32-NEXT: lwz 6, 4(29)
|
||||
; AIX32-NEXT: lwz 7, 0(29)
|
||||
; AIX32-NEXT: addi 29, 1, 80
|
||||
; AIX32-NEXT: mr 5, 29
|
||||
; AIX32-NEXT: li 3, 16
|
||||
; AIX32-NEXT: bl .__atomic_load[PR]
|
||||
; AIX32-NEXT: nop
|
||||
; AIX32-NEXT: lwz 4, 92(1)
|
||||
; AIX32-NEXT: lwz 5, 88(1)
|
||||
; AIX32-NEXT: lwz 6, 84(1)
|
||||
; AIX32-NEXT: lwz 7, 80(1)
|
||||
; AIX32-NEXT: addi 27, 1, 64
|
||||
; AIX32-NEXT: .align 4
|
||||
; AIX32-NEXT: L..BB2_63: # %atomicrmw.start
|
||||
; AIX32-NEXT: #
|
||||
@@ -3915,8 +3939,8 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
|
||||
; AIX32-NEXT: stw 6, 84(1)
|
||||
; AIX32-NEXT: stw 5, 88(1)
|
||||
; AIX32-NEXT: stw 4, 92(1)
|
||||
; AIX32-NEXT: mr 4, 29
|
||||
; AIX32-NEXT: mr 5, 28
|
||||
; AIX32-NEXT: mr 4, 28
|
||||
; AIX32-NEXT: mr 5, 29
|
||||
; AIX32-NEXT: mr 6, 27
|
||||
; AIX32-NEXT: stw 3, 76(1)
|
||||
; AIX32-NEXT: li 3, 16
|
||||
@@ -3935,11 +3959,11 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
|
||||
; AIX32-NEXT: and 3, 4, 23
|
||||
; AIX32-NEXT: li 5, 255
|
||||
; AIX32-NEXT: xor 3, 3, 17
|
||||
; AIX32-NEXT: stw 17, 0(29)
|
||||
; AIX32-NEXT: stw 17, 4(29)
|
||||
; AIX32-NEXT: stw 17, 8(29)
|
||||
; AIX32-NEXT: stw 17, 0(28)
|
||||
; AIX32-NEXT: stw 17, 4(28)
|
||||
; AIX32-NEXT: stw 17, 8(28)
|
||||
; AIX32-NEXT: slw 5, 5, 24
|
||||
; AIX32-NEXT: stw 3, 12(29)
|
||||
; AIX32-NEXT: stw 3, 12(28)
|
||||
; AIX32-NEXT: lbz 3, 0(26)
|
||||
; AIX32-NEXT: sync
|
||||
; AIX32-NEXT: slw 4, 3, 24
|
||||
@@ -5884,41 +5908,43 @@ define dso_local i64 @atommax8(ptr nocapture noundef %ptr, i64 noundef %val) loc
|
||||
; AIX32-NEXT: stwu 1, -80(1)
|
||||
; AIX32-NEXT: stw 0, 88(1)
|
||||
; AIX32-NEXT: stw 30, 72(1) # 4-byte Folded Spill
|
||||
; AIX32-NEXT: mr 30, 4
|
||||
; AIX32-NEXT: li 4, 0
|
||||
; AIX32-NEXT: stw 28, 64(1) # 4-byte Folded Spill
|
||||
; AIX32-NEXT: stw 29, 68(1) # 4-byte Folded Spill
|
||||
; AIX32-NEXT: stw 31, 76(1) # 4-byte Folded Spill
|
||||
; AIX32-NEXT: mr 31, 5
|
||||
; AIX32-NEXT: mr 30, 4
|
||||
; AIX32-NEXT: lwz 4, 4(3)
|
||||
; AIX32-NEXT: lwz 5, 0(3)
|
||||
; AIX32-NEXT: stw 28, 64(1) # 4-byte Folded Spill
|
||||
; AIX32-NEXT: addi 28, 1, 56
|
||||
; AIX32-NEXT: stw 29, 68(1) # 4-byte Folded Spill
|
||||
; AIX32-NEXT: mr 29, 3
|
||||
; AIX32-NEXT: bl .__atomic_load_8[PR]
|
||||
; AIX32-NEXT: nop
|
||||
; AIX32-NEXT: addi 28, 1, 56
|
||||
; AIX32-NEXT: .align 4
|
||||
; AIX32-NEXT: L..BB7_1: # %atomicrmw.start
|
||||
; AIX32-NEXT: #
|
||||
; AIX32-NEXT: cmplw 5, 30
|
||||
; AIX32-NEXT: cmpw 1, 5, 30
|
||||
; AIX32-NEXT: cmplw 3, 30
|
||||
; AIX32-NEXT: cmpw 1, 3, 30
|
||||
; AIX32-NEXT: li 7, 5
|
||||
; AIX32-NEXT: li 8, 5
|
||||
; AIX32-NEXT: stw 5, 56(1)
|
||||
; AIX32-NEXT: mr 3, 29
|
||||
; AIX32-NEXT: stw 3, 56(1)
|
||||
; AIX32-NEXT: crandc 20, 5, 2
|
||||
; AIX32-NEXT: cmplw 1, 4, 31
|
||||
; AIX32-NEXT: crand 21, 2, 5
|
||||
; AIX32-NEXT: stw 4, 60(1)
|
||||
; AIX32-NEXT: cror 20, 21, 20
|
||||
; AIX32-NEXT: isel 5, 5, 30, 20
|
||||
; AIX32-NEXT: isel 5, 3, 30, 20
|
||||
; AIX32-NEXT: isel 6, 4, 31, 20
|
||||
; AIX32-NEXT: mr 3, 29
|
||||
; AIX32-NEXT: mr 4, 28
|
||||
; AIX32-NEXT: bl .__atomic_compare_exchange_8[PR]
|
||||
; AIX32-NEXT: nop
|
||||
; AIX32-NEXT: mr 5, 3
|
||||
; AIX32-NEXT: lwz 4, 60(1)
|
||||
; AIX32-NEXT: lwz 5, 56(1)
|
||||
; AIX32-NEXT: cmplwi 3, 0
|
||||
; AIX32-NEXT: lwz 3, 56(1)
|
||||
; AIX32-NEXT: cmplwi 5, 0
|
||||
; AIX32-NEXT: beq 0, L..BB7_1
|
||||
; AIX32-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; AIX32-NEXT: cmplw 5, 30
|
||||
; AIX32-NEXT: cmpw 1, 5, 30
|
||||
; AIX32-NEXT: cmplw 3, 30
|
||||
; AIX32-NEXT: cmpw 1, 3, 30
|
||||
; AIX32-NEXT: li 3, 55
|
||||
; AIX32-NEXT: lwz 30, 72(1) # 4-byte Folded Reload
|
||||
; AIX32-NEXT: lwz 29, 68(1) # 4-byte Folded Reload
|
||||
|
||||
@@ -8,15 +8,17 @@ define float @test_add(ptr %ptr, float %incr) {
|
||||
; CHECK-64-LABEL: test_add:
|
||||
; CHECK-64: # %bb.0: # %entry
|
||||
; CHECK-64-NEXT: sync
|
||||
; CHECK-64-NEXT: lfs 0, 0(3)
|
||||
; CHECK-64-NEXT: lwz 4, 0(3)
|
||||
; CHECK-64-NEXT: stw 4, -4(1)
|
||||
; CHECK-64-NEXT: lfs 0, -4(1)
|
||||
; CHECK-64-NEXT: .LBB0_1: # %atomicrmw.start
|
||||
; CHECK-64-NEXT: # =>This Loop Header: Depth=1
|
||||
; CHECK-64-NEXT: # Child Loop BB0_2 Depth 2
|
||||
; CHECK-64-NEXT: fadds 2, 0, 1
|
||||
; CHECK-64-NEXT: stfs 2, -4(1)
|
||||
; CHECK-64-NEXT: stfs 0, -8(1)
|
||||
; CHECK-64-NEXT: lwz 5, -4(1)
|
||||
; CHECK-64-NEXT: lwz 6, -8(1)
|
||||
; CHECK-64-NEXT: stfs 2, -8(1)
|
||||
; CHECK-64-NEXT: stfs 0, -12(1)
|
||||
; CHECK-64-NEXT: lwz 5, -8(1)
|
||||
; CHECK-64-NEXT: lwz 6, -12(1)
|
||||
; CHECK-64-NEXT: .LBB0_2: # %cmpxchg.start
|
||||
; CHECK-64-NEXT: # Parent Loop BB0_1 Depth=1
|
||||
; CHECK-64-NEXT: # => This Inner Loop Header: Depth=2
|
||||
@@ -30,8 +32,8 @@ define float @test_add(ptr %ptr, float %incr) {
|
||||
; CHECK-64-NEXT: bne- 0, .LBB0_2
|
||||
; CHECK-64-NEXT: .LBB0_4: # %cmpxchg.end
|
||||
; CHECK-64-NEXT: #
|
||||
; CHECK-64-NEXT: stw 4, -12(1)
|
||||
; CHECK-64-NEXT: lfs 0, -12(1)
|
||||
; CHECK-64-NEXT: stw 4, -16(1)
|
||||
; CHECK-64-NEXT: lfs 0, -16(1)
|
||||
; CHECK-64-NEXT: bc 4, 20, .LBB0_1
|
||||
; CHECK-64-NEXT: b .LBB0_6
|
||||
; CHECK-64-NEXT: .LBB0_5: # %cmpxchg.nostore
|
||||
@@ -48,15 +50,17 @@ define float @test_add(ptr %ptr, float %incr) {
|
||||
; CHECK-32-NEXT: stwu 1, -32(1)
|
||||
; CHECK-32-NEXT: .cfi_def_cfa_offset 32
|
||||
; CHECK-32-NEXT: sync
|
||||
; CHECK-32-NEXT: lfs 0, 0(3)
|
||||
; CHECK-32-NEXT: lwz 4, 0(3)
|
||||
; CHECK-32-NEXT: stw 4, 28(1)
|
||||
; CHECK-32-NEXT: lfs 0, 28(1)
|
||||
; CHECK-32-NEXT: .LBB0_1: # %atomicrmw.start
|
||||
; CHECK-32-NEXT: # =>This Loop Header: Depth=1
|
||||
; CHECK-32-NEXT: # Child Loop BB0_2 Depth 2
|
||||
; CHECK-32-NEXT: fadds 2, 0, 1
|
||||
; CHECK-32-NEXT: stfs 2, 28(1)
|
||||
; CHECK-32-NEXT: stfs 0, 24(1)
|
||||
; CHECK-32-NEXT: lwz 5, 28(1)
|
||||
; CHECK-32-NEXT: lwz 6, 24(1)
|
||||
; CHECK-32-NEXT: stfs 2, 24(1)
|
||||
; CHECK-32-NEXT: stfs 0, 20(1)
|
||||
; CHECK-32-NEXT: lwz 5, 24(1)
|
||||
; CHECK-32-NEXT: lwz 6, 20(1)
|
||||
; CHECK-32-NEXT: .LBB0_2: # %cmpxchg.start
|
||||
; CHECK-32-NEXT: # Parent Loop BB0_1 Depth=1
|
||||
; CHECK-32-NEXT: # => This Inner Loop Header: Depth=2
|
||||
@@ -70,8 +74,8 @@ define float @test_add(ptr %ptr, float %incr) {
|
||||
; CHECK-32-NEXT: bne- 0, .LBB0_2
|
||||
; CHECK-32-NEXT: .LBB0_4: # %cmpxchg.end
|
||||
; CHECK-32-NEXT: #
|
||||
; CHECK-32-NEXT: stw 4, 20(1)
|
||||
; CHECK-32-NEXT: lfs 0, 20(1)
|
||||
; CHECK-32-NEXT: stw 4, 16(1)
|
||||
; CHECK-32-NEXT: lfs 0, 16(1)
|
||||
; CHECK-32-NEXT: bc 4, 20, .LBB0_1
|
||||
; CHECK-32-NEXT: b .LBB0_6
|
||||
; CHECK-32-NEXT: .LBB0_5: # %cmpxchg.nostore
|
||||
|
||||
@@ -195,24 +195,29 @@ define i128 @add(ptr %a, i128 %x) {
|
||||
; PPC-PWR8-NEXT: .cfi_offset r28, -16
|
||||
; PPC-PWR8-NEXT: .cfi_offset r29, -12
|
||||
; PPC-PWR8-NEXT: .cfi_offset r30, -8
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r26, 56(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: mr r26, r3
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r28, 64(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r28, r6
|
||||
; PPC-PWR8-NEXT: lwz r6, 12(r3)
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: li r3, 16
|
||||
; PPC-PWR8-NEXT: mr r4, r26
|
||||
; PPC-PWR8-NEXT: mr r5, r25
|
||||
; PPC-PWR8-NEXT: li r6, 0
|
||||
; PPC-PWR8-NEXT: stw r24, 48(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: lwz r5, 8(r3)
|
||||
; PPC-PWR8-NEXT: lwz r4, 4(r3)
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: lwz r3, 0(r3)
|
||||
; PPC-PWR8-NEXT: stw r29, 68(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: stw r30, 72(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r30, r8
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: bl __atomic_load
|
||||
; PPC-PWR8-NEXT: lwz r6, 44(r1)
|
||||
; PPC-PWR8-NEXT: lwz r5, 40(r1)
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: lwz r4, 36(r1)
|
||||
; PPC-PWR8-NEXT: lwz r3, 32(r1)
|
||||
; PPC-PWR8-NEXT: .p2align 4
|
||||
; PPC-PWR8-NEXT: .LBB1_1: # %atomicrmw.start
|
||||
; PPC-PWR8-NEXT: #
|
||||
@@ -337,24 +342,29 @@ define i128 @sub(ptr %a, i128 %x) {
|
||||
; PPC-PWR8-NEXT: .cfi_offset r28, -16
|
||||
; PPC-PWR8-NEXT: .cfi_offset r29, -12
|
||||
; PPC-PWR8-NEXT: .cfi_offset r30, -8
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r26, 56(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: mr r26, r3
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r28, 64(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r28, r6
|
||||
; PPC-PWR8-NEXT: lwz r6, 12(r3)
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: li r3, 16
|
||||
; PPC-PWR8-NEXT: mr r4, r26
|
||||
; PPC-PWR8-NEXT: mr r5, r25
|
||||
; PPC-PWR8-NEXT: li r6, 0
|
||||
; PPC-PWR8-NEXT: stw r24, 48(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: lwz r5, 8(r3)
|
||||
; PPC-PWR8-NEXT: lwz r4, 4(r3)
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: lwz r3, 0(r3)
|
||||
; PPC-PWR8-NEXT: stw r29, 68(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: stw r30, 72(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r30, r8
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: bl __atomic_load
|
||||
; PPC-PWR8-NEXT: lwz r6, 44(r1)
|
||||
; PPC-PWR8-NEXT: lwz r5, 40(r1)
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: lwz r4, 36(r1)
|
||||
; PPC-PWR8-NEXT: lwz r3, 32(r1)
|
||||
; PPC-PWR8-NEXT: .p2align 4
|
||||
; PPC-PWR8-NEXT: .LBB2_1: # %atomicrmw.start
|
||||
; PPC-PWR8-NEXT: #
|
||||
@@ -479,24 +489,29 @@ define i128 @and(ptr %a, i128 %x) {
|
||||
; PPC-PWR8-NEXT: .cfi_offset r28, -16
|
||||
; PPC-PWR8-NEXT: .cfi_offset r29, -12
|
||||
; PPC-PWR8-NEXT: .cfi_offset r30, -8
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r26, 56(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: mr r26, r3
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r28, 64(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r28, r6
|
||||
; PPC-PWR8-NEXT: lwz r6, 12(r3)
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: li r3, 16
|
||||
; PPC-PWR8-NEXT: mr r4, r26
|
||||
; PPC-PWR8-NEXT: mr r5, r25
|
||||
; PPC-PWR8-NEXT: li r6, 0
|
||||
; PPC-PWR8-NEXT: stw r24, 48(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: lwz r5, 8(r3)
|
||||
; PPC-PWR8-NEXT: lwz r4, 4(r3)
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: lwz r3, 0(r3)
|
||||
; PPC-PWR8-NEXT: stw r29, 68(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: stw r30, 72(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r30, r8
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: bl __atomic_load
|
||||
; PPC-PWR8-NEXT: lwz r6, 44(r1)
|
||||
; PPC-PWR8-NEXT: lwz r5, 40(r1)
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: lwz r4, 36(r1)
|
||||
; PPC-PWR8-NEXT: lwz r3, 32(r1)
|
||||
; PPC-PWR8-NEXT: .p2align 4
|
||||
; PPC-PWR8-NEXT: .LBB3_1: # %atomicrmw.start
|
||||
; PPC-PWR8-NEXT: #
|
||||
@@ -621,24 +636,29 @@ define i128 @or(ptr %a, i128 %x) {
|
||||
; PPC-PWR8-NEXT: .cfi_offset r28, -16
|
||||
; PPC-PWR8-NEXT: .cfi_offset r29, -12
|
||||
; PPC-PWR8-NEXT: .cfi_offset r30, -8
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r26, 56(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: mr r26, r3
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r28, 64(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r28, r6
|
||||
; PPC-PWR8-NEXT: lwz r6, 12(r3)
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: li r3, 16
|
||||
; PPC-PWR8-NEXT: mr r4, r26
|
||||
; PPC-PWR8-NEXT: mr r5, r25
|
||||
; PPC-PWR8-NEXT: li r6, 0
|
||||
; PPC-PWR8-NEXT: stw r24, 48(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: lwz r5, 8(r3)
|
||||
; PPC-PWR8-NEXT: lwz r4, 4(r3)
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: lwz r3, 0(r3)
|
||||
; PPC-PWR8-NEXT: stw r29, 68(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: stw r30, 72(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r30, r8
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: bl __atomic_load
|
||||
; PPC-PWR8-NEXT: lwz r6, 44(r1)
|
||||
; PPC-PWR8-NEXT: lwz r5, 40(r1)
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: lwz r4, 36(r1)
|
||||
; PPC-PWR8-NEXT: lwz r3, 32(r1)
|
||||
; PPC-PWR8-NEXT: .p2align 4
|
||||
; PPC-PWR8-NEXT: .LBB4_1: # %atomicrmw.start
|
||||
; PPC-PWR8-NEXT: #
|
||||
@@ -763,24 +783,29 @@ define i128 @xor(ptr %a, i128 %x) {
|
||||
; PPC-PWR8-NEXT: .cfi_offset r28, -16
|
||||
; PPC-PWR8-NEXT: .cfi_offset r29, -12
|
||||
; PPC-PWR8-NEXT: .cfi_offset r30, -8
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r26, 56(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: mr r26, r3
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r28, 64(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r28, r6
|
||||
; PPC-PWR8-NEXT: lwz r6, 12(r3)
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: li r3, 16
|
||||
; PPC-PWR8-NEXT: mr r4, r26
|
||||
; PPC-PWR8-NEXT: mr r5, r25
|
||||
; PPC-PWR8-NEXT: li r6, 0
|
||||
; PPC-PWR8-NEXT: stw r24, 48(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: lwz r5, 8(r3)
|
||||
; PPC-PWR8-NEXT: lwz r4, 4(r3)
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: lwz r3, 0(r3)
|
||||
; PPC-PWR8-NEXT: stw r29, 68(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: stw r30, 72(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r30, r8
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: bl __atomic_load
|
||||
; PPC-PWR8-NEXT: lwz r6, 44(r1)
|
||||
; PPC-PWR8-NEXT: lwz r5, 40(r1)
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: lwz r4, 36(r1)
|
||||
; PPC-PWR8-NEXT: lwz r3, 32(r1)
|
||||
; PPC-PWR8-NEXT: .p2align 4
|
||||
; PPC-PWR8-NEXT: .LBB5_1: # %atomicrmw.start
|
||||
; PPC-PWR8-NEXT: #
|
||||
@@ -905,24 +930,29 @@ define i128 @nand(ptr %a, i128 %x) {
|
||||
; PPC-PWR8-NEXT: .cfi_offset r28, -16
|
||||
; PPC-PWR8-NEXT: .cfi_offset r29, -12
|
||||
; PPC-PWR8-NEXT: .cfi_offset r30, -8
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r26, 56(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: mr r26, r3
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: stw r27, 60(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: stw r28, 64(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r28, r6
|
||||
; PPC-PWR8-NEXT: lwz r6, 12(r3)
|
||||
; PPC-PWR8-NEXT: mr r27, r5
|
||||
; PPC-PWR8-NEXT: li r3, 16
|
||||
; PPC-PWR8-NEXT: mr r4, r26
|
||||
; PPC-PWR8-NEXT: mr r5, r25
|
||||
; PPC-PWR8-NEXT: li r6, 0
|
||||
; PPC-PWR8-NEXT: stw r24, 48(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: lwz r5, 8(r3)
|
||||
; PPC-PWR8-NEXT: lwz r4, 4(r3)
|
||||
; PPC-PWR8-NEXT: stw r25, 52(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: addi r25, r1, 32
|
||||
; PPC-PWR8-NEXT: lwz r3, 0(r3)
|
||||
; PPC-PWR8-NEXT: stw r29, 68(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: stw r30, 72(r1) # 4-byte Folded Spill
|
||||
; PPC-PWR8-NEXT: mr r30, r8
|
||||
; PPC-PWR8-NEXT: mr r29, r7
|
||||
; PPC-PWR8-NEXT: bl __atomic_load
|
||||
; PPC-PWR8-NEXT: lwz r6, 44(r1)
|
||||
; PPC-PWR8-NEXT: lwz r5, 40(r1)
|
||||
; PPC-PWR8-NEXT: addi r24, r1, 16
|
||||
; PPC-PWR8-NEXT: lwz r4, 36(r1)
|
||||
; PPC-PWR8-NEXT: lwz r3, 32(r1)
|
||||
; PPC-PWR8-NEXT: .p2align 4
|
||||
; PPC-PWR8-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; PPC-PWR8-NEXT: #
|
||||
|
||||
@@ -192,41 +192,42 @@ define void @amomax_d_discard(ptr %a, i64 %b) nounwind {
|
||||
; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a2
|
||||
; RV32-NEXT: mv s1, a0
|
||||
; RV32-NEXT: lw a4, 0(a0)
|
||||
; RV32-NEXT: lw a5, 4(a0)
|
||||
; RV32-NEXT: mv s2, a1
|
||||
; RV32-NEXT: mv s1, a1
|
||||
; RV32-NEXT: mv s2, a0
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv a4, a0
|
||||
; RV32-NEXT: j .LBB11_2
|
||||
; RV32-NEXT: .LBB11_1: # %select.end
|
||||
; RV32-NEXT: # in Loop: Header=BB11_2 Depth=1
|
||||
; RV32-NEXT: sw a4, 8(sp)
|
||||
; RV32-NEXT: sw a5, 12(sp)
|
||||
; RV32-NEXT: sw a1, 12(sp)
|
||||
; RV32-NEXT: addi a1, sp, 8
|
||||
; RV32-NEXT: li a4, 5
|
||||
; RV32-NEXT: li a5, 5
|
||||
; RV32-NEXT: mv a0, s1
|
||||
; RV32-NEXT: mv a0, s2
|
||||
; RV32-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32-NEXT: lw a4, 8(sp)
|
||||
; RV32-NEXT: lw a5, 12(sp)
|
||||
; RV32-NEXT: lw a1, 12(sp)
|
||||
; RV32-NEXT: bnez a0, .LBB11_6
|
||||
; RV32-NEXT: .LBB11_2: # %atomicrmw.start
|
||||
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NEXT: beq a5, s0, .LBB11_4
|
||||
; RV32-NEXT: beq a1, s0, .LBB11_4
|
||||
; RV32-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32-NEXT: # in Loop: Header=BB11_2 Depth=1
|
||||
; RV32-NEXT: slt a0, s0, a5
|
||||
; RV32-NEXT: slt a0, s0, a1
|
||||
; RV32-NEXT: mv a2, a4
|
||||
; RV32-NEXT: mv a3, a5
|
||||
; RV32-NEXT: mv a3, a1
|
||||
; RV32-NEXT: bnez a0, .LBB11_1
|
||||
; RV32-NEXT: j .LBB11_5
|
||||
; RV32-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1
|
||||
; RV32-NEXT: sltu a0, s2, a4
|
||||
; RV32-NEXT: sltu a0, s1, a4
|
||||
; RV32-NEXT: mv a2, a4
|
||||
; RV32-NEXT: mv a3, a5
|
||||
; RV32-NEXT: mv a3, a1
|
||||
; RV32-NEXT: bnez a0, .LBB11_1
|
||||
; RV32-NEXT: .LBB11_5: # %select.false
|
||||
; RV32-NEXT: # in Loop: Header=BB11_2 Depth=1
|
||||
; RV32-NEXT: mv a2, s2
|
||||
; RV32-NEXT: mv a2, s1
|
||||
; RV32-NEXT: mv a3, s0
|
||||
; RV32-NEXT: j .LBB11_1
|
||||
; RV32-NEXT: .LBB11_6: # %atomicrmw.end
|
||||
@@ -268,41 +269,42 @@ define void @amomaxu_d_discard(ptr %a, i64 %b) nounwind {
|
||||
; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a2
|
||||
; RV32-NEXT: mv s1, a0
|
||||
; RV32-NEXT: lw a4, 0(a0)
|
||||
; RV32-NEXT: lw a5, 4(a0)
|
||||
; RV32-NEXT: mv s2, a1
|
||||
; RV32-NEXT: mv s1, a1
|
||||
; RV32-NEXT: mv s2, a0
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv a4, a0
|
||||
; RV32-NEXT: j .LBB13_2
|
||||
; RV32-NEXT: .LBB13_1: # %select.end
|
||||
; RV32-NEXT: # in Loop: Header=BB13_2 Depth=1
|
||||
; RV32-NEXT: sw a4, 8(sp)
|
||||
; RV32-NEXT: sw a5, 12(sp)
|
||||
; RV32-NEXT: sw a1, 12(sp)
|
||||
; RV32-NEXT: addi a1, sp, 8
|
||||
; RV32-NEXT: li a4, 5
|
||||
; RV32-NEXT: li a5, 5
|
||||
; RV32-NEXT: mv a0, s1
|
||||
; RV32-NEXT: mv a0, s2
|
||||
; RV32-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32-NEXT: lw a4, 8(sp)
|
||||
; RV32-NEXT: lw a5, 12(sp)
|
||||
; RV32-NEXT: lw a1, 12(sp)
|
||||
; RV32-NEXT: bnez a0, .LBB13_6
|
||||
; RV32-NEXT: .LBB13_2: # %atomicrmw.start
|
||||
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NEXT: beq a5, s0, .LBB13_4
|
||||
; RV32-NEXT: beq a1, s0, .LBB13_4
|
||||
; RV32-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32-NEXT: # in Loop: Header=BB13_2 Depth=1
|
||||
; RV32-NEXT: sltu a0, s0, a5
|
||||
; RV32-NEXT: sltu a0, s0, a1
|
||||
; RV32-NEXT: mv a2, a4
|
||||
; RV32-NEXT: mv a3, a5
|
||||
; RV32-NEXT: mv a3, a1
|
||||
; RV32-NEXT: bnez a0, .LBB13_1
|
||||
; RV32-NEXT: j .LBB13_5
|
||||
; RV32-NEXT: .LBB13_4: # in Loop: Header=BB13_2 Depth=1
|
||||
; RV32-NEXT: sltu a0, s2, a4
|
||||
; RV32-NEXT: sltu a0, s1, a4
|
||||
; RV32-NEXT: mv a2, a4
|
||||
; RV32-NEXT: mv a3, a5
|
||||
; RV32-NEXT: mv a3, a1
|
||||
; RV32-NEXT: bnez a0, .LBB13_1
|
||||
; RV32-NEXT: .LBB13_5: # %select.false
|
||||
; RV32-NEXT: # in Loop: Header=BB13_2 Depth=1
|
||||
; RV32-NEXT: mv a2, s2
|
||||
; RV32-NEXT: mv a2, s1
|
||||
; RV32-NEXT: mv a3, s0
|
||||
; RV32-NEXT: j .LBB13_1
|
||||
; RV32-NEXT: .LBB13_6: # %atomicrmw.end
|
||||
@@ -344,41 +346,42 @@ define void @amomin_d_discard(ptr %a, i64 %b) nounwind {
|
||||
; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a2
|
||||
; RV32-NEXT: mv s1, a0
|
||||
; RV32-NEXT: lw a4, 0(a0)
|
||||
; RV32-NEXT: lw a5, 4(a0)
|
||||
; RV32-NEXT: mv s2, a1
|
||||
; RV32-NEXT: mv s1, a1
|
||||
; RV32-NEXT: mv s2, a0
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv a4, a0
|
||||
; RV32-NEXT: j .LBB15_2
|
||||
; RV32-NEXT: .LBB15_1: # %select.end
|
||||
; RV32-NEXT: # in Loop: Header=BB15_2 Depth=1
|
||||
; RV32-NEXT: sw a4, 8(sp)
|
||||
; RV32-NEXT: sw a5, 12(sp)
|
||||
; RV32-NEXT: sw a1, 12(sp)
|
||||
; RV32-NEXT: addi a1, sp, 8
|
||||
; RV32-NEXT: li a4, 5
|
||||
; RV32-NEXT: li a5, 5
|
||||
; RV32-NEXT: mv a0, s1
|
||||
; RV32-NEXT: mv a0, s2
|
||||
; RV32-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32-NEXT: lw a4, 8(sp)
|
||||
; RV32-NEXT: lw a5, 12(sp)
|
||||
; RV32-NEXT: lw a1, 12(sp)
|
||||
; RV32-NEXT: bnez a0, .LBB15_6
|
||||
; RV32-NEXT: .LBB15_2: # %atomicrmw.start
|
||||
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NEXT: beq a5, s0, .LBB15_4
|
||||
; RV32-NEXT: beq a1, s0, .LBB15_4
|
||||
; RV32-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32-NEXT: # in Loop: Header=BB15_2 Depth=1
|
||||
; RV32-NEXT: slt a0, s0, a5
|
||||
; RV32-NEXT: slt a0, s0, a1
|
||||
; RV32-NEXT: mv a2, a4
|
||||
; RV32-NEXT: mv a3, a5
|
||||
; RV32-NEXT: mv a3, a1
|
||||
; RV32-NEXT: beqz a0, .LBB15_1
|
||||
; RV32-NEXT: j .LBB15_5
|
||||
; RV32-NEXT: .LBB15_4: # in Loop: Header=BB15_2 Depth=1
|
||||
; RV32-NEXT: sltu a0, s2, a4
|
||||
; RV32-NEXT: sltu a0, s1, a4
|
||||
; RV32-NEXT: mv a2, a4
|
||||
; RV32-NEXT: mv a3, a5
|
||||
; RV32-NEXT: mv a3, a1
|
||||
; RV32-NEXT: beqz a0, .LBB15_1
|
||||
; RV32-NEXT: .LBB15_5: # %select.false
|
||||
; RV32-NEXT: # in Loop: Header=BB15_2 Depth=1
|
||||
; RV32-NEXT: mv a2, s2
|
||||
; RV32-NEXT: mv a2, s1
|
||||
; RV32-NEXT: mv a3, s0
|
||||
; RV32-NEXT: j .LBB15_1
|
||||
; RV32-NEXT: .LBB15_6: # %atomicrmw.end
|
||||
@@ -420,41 +423,42 @@ define void @amominu_d_discard(ptr %a, i64 %b) nounwind {
|
||||
; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a2
|
||||
; RV32-NEXT: mv s1, a0
|
||||
; RV32-NEXT: lw a4, 0(a0)
|
||||
; RV32-NEXT: lw a5, 4(a0)
|
||||
; RV32-NEXT: mv s2, a1
|
||||
; RV32-NEXT: mv s1, a1
|
||||
; RV32-NEXT: mv s2, a0
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv a4, a0
|
||||
; RV32-NEXT: j .LBB17_2
|
||||
; RV32-NEXT: .LBB17_1: # %select.end
|
||||
; RV32-NEXT: # in Loop: Header=BB17_2 Depth=1
|
||||
; RV32-NEXT: sw a4, 8(sp)
|
||||
; RV32-NEXT: sw a5, 12(sp)
|
||||
; RV32-NEXT: sw a1, 12(sp)
|
||||
; RV32-NEXT: addi a1, sp, 8
|
||||
; RV32-NEXT: li a4, 5
|
||||
; RV32-NEXT: li a5, 5
|
||||
; RV32-NEXT: mv a0, s1
|
||||
; RV32-NEXT: mv a0, s2
|
||||
; RV32-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32-NEXT: lw a4, 8(sp)
|
||||
; RV32-NEXT: lw a5, 12(sp)
|
||||
; RV32-NEXT: lw a1, 12(sp)
|
||||
; RV32-NEXT: bnez a0, .LBB17_6
|
||||
; RV32-NEXT: .LBB17_2: # %atomicrmw.start
|
||||
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NEXT: beq a5, s0, .LBB17_4
|
||||
; RV32-NEXT: beq a1, s0, .LBB17_4
|
||||
; RV32-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32-NEXT: # in Loop: Header=BB17_2 Depth=1
|
||||
; RV32-NEXT: sltu a0, s0, a5
|
||||
; RV32-NEXT: sltu a0, s0, a1
|
||||
; RV32-NEXT: mv a2, a4
|
||||
; RV32-NEXT: mv a3, a5
|
||||
; RV32-NEXT: mv a3, a1
|
||||
; RV32-NEXT: beqz a0, .LBB17_1
|
||||
; RV32-NEXT: j .LBB17_5
|
||||
; RV32-NEXT: .LBB17_4: # in Loop: Header=BB17_2 Depth=1
|
||||
; RV32-NEXT: sltu a0, s2, a4
|
||||
; RV32-NEXT: sltu a0, s1, a4
|
||||
; RV32-NEXT: mv a2, a4
|
||||
; RV32-NEXT: mv a3, a5
|
||||
; RV32-NEXT: mv a3, a1
|
||||
; RV32-NEXT: beqz a0, .LBB17_1
|
||||
; RV32-NEXT: .LBB17_5: # %select.false
|
||||
; RV32-NEXT: # in Loop: Header=BB17_2 Depth=1
|
||||
; RV32-NEXT: mv a2, s2
|
||||
; RV32-NEXT: mv a2, s1
|
||||
; RV32-NEXT: mv a3, s0
|
||||
; RV32-NEXT: j .LBB17_1
|
||||
; RV32-NEXT: .LBB17_6: # %atomicrmw.end
|
||||
|
||||
@@ -294,45 +294,45 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
|
||||
; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32IB-COMMON-NEXT: mv s0, a2
|
||||
; RV32IB-COMMON-NEXT: mv s1, a0
|
||||
; RV32IB-COMMON-NEXT: lw a4, 0(a0)
|
||||
; RV32IB-COMMON-NEXT: lw a5, 4(a0)
|
||||
; RV32IB-COMMON-NEXT: mv s2, a1
|
||||
; RV32IB-COMMON-NEXT: mv s1, a1
|
||||
; RV32IB-COMMON-NEXT: mv s2, a0
|
||||
; RV32IB-COMMON-NEXT: li a1, 0
|
||||
; RV32IB-COMMON-NEXT: call __atomic_load_8
|
||||
; RV32IB-COMMON-NEXT: mv a4, a0
|
||||
; RV32IB-COMMON-NEXT: j .LBB4_2
|
||||
; RV32IB-COMMON-NEXT: .LBB4_1: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: sw a4, 8(sp)
|
||||
; RV32IB-COMMON-NEXT: sw a5, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: sw a1, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: addi a1, sp, 8
|
||||
; RV32IB-COMMON-NEXT: li a4, 5
|
||||
; RV32IB-COMMON-NEXT: li a5, 5
|
||||
; RV32IB-COMMON-NEXT: mv a0, s1
|
||||
; RV32IB-COMMON-NEXT: mv a0, s2
|
||||
; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32IB-COMMON-NEXT: lw a4, 8(sp)
|
||||
; RV32IB-COMMON-NEXT: lw a5, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: lw a1, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: bnez a0, .LBB4_7
|
||||
; RV32IB-COMMON-NEXT: .LBB4_2: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32IB-COMMON-NEXT: beq a5, s0, .LBB4_4
|
||||
; RV32IB-COMMON-NEXT: beq a1, s0, .LBB4_4
|
||||
; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: slt a0, s0, a5
|
||||
; RV32IB-COMMON-NEXT: slt a0, s0, a1
|
||||
; RV32IB-COMMON-NEXT: j .LBB4_5
|
||||
; RV32IB-COMMON-NEXT: .LBB4_4: # in Loop: Header=BB4_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: sltu a0, s2, a4
|
||||
; RV32IB-COMMON-NEXT: sltu a0, s1, a4
|
||||
; RV32IB-COMMON-NEXT: .LBB4_5: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: mv a2, a4
|
||||
; RV32IB-COMMON-NEXT: mv a3, a5
|
||||
; RV32IB-COMMON-NEXT: mv a3, a1
|
||||
; RV32IB-COMMON-NEXT: bnez a0, .LBB4_1
|
||||
; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB4_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: mv a2, s2
|
||||
; RV32IB-COMMON-NEXT: mv a2, s1
|
||||
; RV32IB-COMMON-NEXT: mv a3, s0
|
||||
; RV32IB-COMMON-NEXT: j .LBB4_1
|
||||
; RV32IB-COMMON-NEXT: .LBB4_7: # %atomicrmw.end
|
||||
; RV32IB-COMMON-NEXT: mv a0, a4
|
||||
; RV32IB-COMMON-NEXT: mv a1, a5
|
||||
; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -383,45 +383,45 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
|
||||
; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32IB-COMMON-NEXT: mv s0, a2
|
||||
; RV32IB-COMMON-NEXT: mv s1, a0
|
||||
; RV32IB-COMMON-NEXT: lw a4, 0(a0)
|
||||
; RV32IB-COMMON-NEXT: lw a5, 4(a0)
|
||||
; RV32IB-COMMON-NEXT: mv s2, a1
|
||||
; RV32IB-COMMON-NEXT: mv s1, a1
|
||||
; RV32IB-COMMON-NEXT: mv s2, a0
|
||||
; RV32IB-COMMON-NEXT: li a1, 0
|
||||
; RV32IB-COMMON-NEXT: call __atomic_load_8
|
||||
; RV32IB-COMMON-NEXT: mv a4, a0
|
||||
; RV32IB-COMMON-NEXT: j .LBB5_2
|
||||
; RV32IB-COMMON-NEXT: .LBB5_1: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: sw a4, 8(sp)
|
||||
; RV32IB-COMMON-NEXT: sw a5, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: sw a1, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: addi a1, sp, 8
|
||||
; RV32IB-COMMON-NEXT: li a4, 5
|
||||
; RV32IB-COMMON-NEXT: li a5, 5
|
||||
; RV32IB-COMMON-NEXT: mv a0, s1
|
||||
; RV32IB-COMMON-NEXT: mv a0, s2
|
||||
; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32IB-COMMON-NEXT: lw a4, 8(sp)
|
||||
; RV32IB-COMMON-NEXT: lw a5, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: lw a1, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: bnez a0, .LBB5_7
|
||||
; RV32IB-COMMON-NEXT: .LBB5_2: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32IB-COMMON-NEXT: beq a5, s0, .LBB5_4
|
||||
; RV32IB-COMMON-NEXT: beq a1, s0, .LBB5_4
|
||||
; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: slt a0, a5, s0
|
||||
; RV32IB-COMMON-NEXT: slt a0, a1, s0
|
||||
; RV32IB-COMMON-NEXT: j .LBB5_5
|
||||
; RV32IB-COMMON-NEXT: .LBB5_4: # in Loop: Header=BB5_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: sltu a0, a4, s2
|
||||
; RV32IB-COMMON-NEXT: sltu a0, a4, s1
|
||||
; RV32IB-COMMON-NEXT: .LBB5_5: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: mv a2, a4
|
||||
; RV32IB-COMMON-NEXT: mv a3, a5
|
||||
; RV32IB-COMMON-NEXT: mv a3, a1
|
||||
; RV32IB-COMMON-NEXT: bnez a0, .LBB5_1
|
||||
; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB5_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: mv a2, s2
|
||||
; RV32IB-COMMON-NEXT: mv a2, s1
|
||||
; RV32IB-COMMON-NEXT: mv a3, s0
|
||||
; RV32IB-COMMON-NEXT: j .LBB5_1
|
||||
; RV32IB-COMMON-NEXT: .LBB5_7: # %atomicrmw.end
|
||||
; RV32IB-COMMON-NEXT: mv a0, a4
|
||||
; RV32IB-COMMON-NEXT: mv a1, a5
|
||||
; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -472,45 +472,45 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
|
||||
; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32IB-COMMON-NEXT: mv s0, a2
|
||||
; RV32IB-COMMON-NEXT: mv s1, a0
|
||||
; RV32IB-COMMON-NEXT: lw a4, 0(a0)
|
||||
; RV32IB-COMMON-NEXT: lw a5, 4(a0)
|
||||
; RV32IB-COMMON-NEXT: mv s2, a1
|
||||
; RV32IB-COMMON-NEXT: mv s1, a1
|
||||
; RV32IB-COMMON-NEXT: mv s2, a0
|
||||
; RV32IB-COMMON-NEXT: li a1, 0
|
||||
; RV32IB-COMMON-NEXT: call __atomic_load_8
|
||||
; RV32IB-COMMON-NEXT: mv a4, a0
|
||||
; RV32IB-COMMON-NEXT: j .LBB6_2
|
||||
; RV32IB-COMMON-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: sw a4, 8(sp)
|
||||
; RV32IB-COMMON-NEXT: sw a5, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: sw a1, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: addi a1, sp, 8
|
||||
; RV32IB-COMMON-NEXT: li a4, 5
|
||||
; RV32IB-COMMON-NEXT: li a5, 5
|
||||
; RV32IB-COMMON-NEXT: mv a0, s1
|
||||
; RV32IB-COMMON-NEXT: mv a0, s2
|
||||
; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32IB-COMMON-NEXT: lw a4, 8(sp)
|
||||
; RV32IB-COMMON-NEXT: lw a5, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: lw a1, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: bnez a0, .LBB6_7
|
||||
; RV32IB-COMMON-NEXT: .LBB6_2: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32IB-COMMON-NEXT: beq a5, s0, .LBB6_4
|
||||
; RV32IB-COMMON-NEXT: beq a1, s0, .LBB6_4
|
||||
; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: sltu a0, s0, a5
|
||||
; RV32IB-COMMON-NEXT: sltu a0, s0, a1
|
||||
; RV32IB-COMMON-NEXT: j .LBB6_5
|
||||
; RV32IB-COMMON-NEXT: .LBB6_4: # in Loop: Header=BB6_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: sltu a0, s2, a4
|
||||
; RV32IB-COMMON-NEXT: sltu a0, s1, a4
|
||||
; RV32IB-COMMON-NEXT: .LBB6_5: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: mv a2, a4
|
||||
; RV32IB-COMMON-NEXT: mv a3, a5
|
||||
; RV32IB-COMMON-NEXT: mv a3, a1
|
||||
; RV32IB-COMMON-NEXT: bnez a0, .LBB6_1
|
||||
; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB6_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: mv a2, s2
|
||||
; RV32IB-COMMON-NEXT: mv a2, s1
|
||||
; RV32IB-COMMON-NEXT: mv a3, s0
|
||||
; RV32IB-COMMON-NEXT: j .LBB6_1
|
||||
; RV32IB-COMMON-NEXT: .LBB6_7: # %atomicrmw.end
|
||||
; RV32IB-COMMON-NEXT: mv a0, a4
|
||||
; RV32IB-COMMON-NEXT: mv a1, a5
|
||||
; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -561,45 +561,45 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
|
||||
; RV32IB-COMMON-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32IB-COMMON-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32IB-COMMON-NEXT: mv s0, a2
|
||||
; RV32IB-COMMON-NEXT: mv s1, a0
|
||||
; RV32IB-COMMON-NEXT: lw a4, 0(a0)
|
||||
; RV32IB-COMMON-NEXT: lw a5, 4(a0)
|
||||
; RV32IB-COMMON-NEXT: mv s2, a1
|
||||
; RV32IB-COMMON-NEXT: mv s1, a1
|
||||
; RV32IB-COMMON-NEXT: mv s2, a0
|
||||
; RV32IB-COMMON-NEXT: li a1, 0
|
||||
; RV32IB-COMMON-NEXT: call __atomic_load_8
|
||||
; RV32IB-COMMON-NEXT: mv a4, a0
|
||||
; RV32IB-COMMON-NEXT: j .LBB7_2
|
||||
; RV32IB-COMMON-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: sw a4, 8(sp)
|
||||
; RV32IB-COMMON-NEXT: sw a5, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: sw a1, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: addi a1, sp, 8
|
||||
; RV32IB-COMMON-NEXT: li a4, 5
|
||||
; RV32IB-COMMON-NEXT: li a5, 5
|
||||
; RV32IB-COMMON-NEXT: mv a0, s1
|
||||
; RV32IB-COMMON-NEXT: mv a0, s2
|
||||
; RV32IB-COMMON-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32IB-COMMON-NEXT: lw a4, 8(sp)
|
||||
; RV32IB-COMMON-NEXT: lw a5, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: lw a1, 12(sp)
|
||||
; RV32IB-COMMON-NEXT: bnez a0, .LBB7_7
|
||||
; RV32IB-COMMON-NEXT: .LBB7_2: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32IB-COMMON-NEXT: beq a5, s0, .LBB7_4
|
||||
; RV32IB-COMMON-NEXT: beq a1, s0, .LBB7_4
|
||||
; RV32IB-COMMON-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: sltu a0, a5, s0
|
||||
; RV32IB-COMMON-NEXT: sltu a0, a1, s0
|
||||
; RV32IB-COMMON-NEXT: j .LBB7_5
|
||||
; RV32IB-COMMON-NEXT: .LBB7_4: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: sltu a0, a4, s2
|
||||
; RV32IB-COMMON-NEXT: sltu a0, a4, s1
|
||||
; RV32IB-COMMON-NEXT: .LBB7_5: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: mv a2, a4
|
||||
; RV32IB-COMMON-NEXT: mv a3, a5
|
||||
; RV32IB-COMMON-NEXT: mv a3, a1
|
||||
; RV32IB-COMMON-NEXT: bnez a0, .LBB7_1
|
||||
; RV32IB-COMMON-NEXT: # %bb.6: # %atomicrmw.start
|
||||
; RV32IB-COMMON-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32IB-COMMON-NEXT: mv a2, s2
|
||||
; RV32IB-COMMON-NEXT: mv a2, s1
|
||||
; RV32IB-COMMON-NEXT: mv a3, s0
|
||||
; RV32IB-COMMON-NEXT: j .LBB7_1
|
||||
; RV32IB-COMMON-NEXT: .LBB7_7: # %atomicrmw.end
|
||||
; RV32IB-COMMON-NEXT: mv a0, a4
|
||||
; RV32IB-COMMON-NEXT: mv a1, a5
|
||||
; RV32IB-COMMON-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32IB-COMMON-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32IB-COMMON-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -26,27 +26,29 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
|
||||
; RV32I-NEXT: .cfi_offset s0, -8
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: .cfi_offset s2, -16
|
||||
; RV32I-NEXT: mv s0, a0
|
||||
; RV32I-NEXT: lbu a3, 0(a0)
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: zext.b s2, a1
|
||||
; RV32I-NEXT: mv s0, a1
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_1
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: zext.b s2, s0
|
||||
; RV32I-NEXT: .LBB0_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: zext.b a0, a3
|
||||
; RV32I-NEXT: zext.b a0, a1
|
||||
; RV32I-NEXT: sltu a0, a0, s2
|
||||
; RV32I-NEXT: addi a0, a0, -1
|
||||
; RV32I-NEXT: and a0, a0, s1
|
||||
; RV32I-NEXT: sub a2, a3, a0
|
||||
; RV32I-NEXT: sb a3, 15(sp)
|
||||
; RV32I-NEXT: and a0, a0, s0
|
||||
; RV32I-NEXT: sub a2, a1, a0
|
||||
; RV32I-NEXT: sb a1, 15(sp)
|
||||
; RV32I-NEXT: addi a1, sp, 15
|
||||
; RV32I-NEXT: li a3, 5
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: mv a0, s0
|
||||
; RV32I-NEXT: mv a0, s1
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_1
|
||||
; RV32I-NEXT: lbu a3, 15(sp)
|
||||
; RV32I-NEXT: lbu a1, 15(sp)
|
||||
; RV32I-NEXT: beqz a0, .LBB0_1
|
||||
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a3
|
||||
; RV32I-NEXT: mv a0, a1
|
||||
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -109,27 +111,29 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: .cfi_offset s2, -32
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: lbu a3, 0(a0)
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: zext.b s2, a1
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_1
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: zext.b s2, s0
|
||||
; RV64I-NEXT: .LBB0_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: zext.b a0, a3
|
||||
; RV64I-NEXT: zext.b a0, a1
|
||||
; RV64I-NEXT: sltu a0, a0, s2
|
||||
; RV64I-NEXT: addi a0, a0, -1
|
||||
; RV64I-NEXT: and a0, a0, s1
|
||||
; RV64I-NEXT: sub a2, a3, a0
|
||||
; RV64I-NEXT: sb a3, 15(sp)
|
||||
; RV64I-NEXT: and a0, a0, s0
|
||||
; RV64I-NEXT: sub a2, a1, a0
|
||||
; RV64I-NEXT: sb a1, 15(sp)
|
||||
; RV64I-NEXT: addi a1, sp, 15
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: mv a0, s1
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_1
|
||||
; RV64I-NEXT: lbu a3, 15(sp)
|
||||
; RV64I-NEXT: lbu a1, 15(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB0_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
|
||||
@@ -200,7 +204,9 @@ define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
|
||||
; RV32I-NEXT: .cfi_offset s3, -20
|
||||
; RV32I-NEXT: mv s0, a1
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: lhu a1, 0(a0)
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_2
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: lui s2, 16
|
||||
; RV32I-NEXT: addi s2, s2, -1
|
||||
; RV32I-NEXT: and s3, s0, s2
|
||||
@@ -290,7 +296,9 @@ define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
|
||||
; RV64I-NEXT: .cfi_offset s3, -40
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: lhu a1, 0(a0)
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_2
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: lui s2, 16
|
||||
; RV64I-NEXT: addi s2, s2, -1
|
||||
; RV64I-NEXT: and s3, s0, s2
|
||||
@@ -378,25 +386,27 @@ define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
|
||||
; RV32I-NEXT: .cfi_offset ra, -4
|
||||
; RV32I-NEXT: .cfi_offset s0, -8
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: mv s0, a0
|
||||
; RV32I-NEXT: lw a3, 0(a0)
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s0, a1
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_4
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: .LBB2_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: sltu a0, a3, s1
|
||||
; RV32I-NEXT: sltu a0, a1, s0
|
||||
; RV32I-NEXT: addi a0, a0, -1
|
||||
; RV32I-NEXT: and a0, a0, s1
|
||||
; RV32I-NEXT: sub a2, a3, a0
|
||||
; RV32I-NEXT: sw a3, 0(sp)
|
||||
; RV32I-NEXT: and a0, a0, s0
|
||||
; RV32I-NEXT: sub a2, a1, a0
|
||||
; RV32I-NEXT: sw a1, 0(sp)
|
||||
; RV32I-NEXT: mv a1, sp
|
||||
; RV32I-NEXT: li a3, 5
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: mv a0, s0
|
||||
; RV32I-NEXT: mv a0, s1
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_4
|
||||
; RV32I-NEXT: lw a3, 0(sp)
|
||||
; RV32I-NEXT: lw a1, 0(sp)
|
||||
; RV32I-NEXT: beqz a0, .LBB2_1
|
||||
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a3
|
||||
; RV32I-NEXT: mv a0, a1
|
||||
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
|
||||
@@ -444,26 +454,29 @@ define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: .cfi_offset s2, -32
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: lw a3, 0(a0)
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: sext.w s2, a1
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_4
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: sext.w s2, s0
|
||||
; RV64I-NEXT: .LBB2_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: sltu a0, a3, s2
|
||||
; RV64I-NEXT: sext.w a0, a1
|
||||
; RV64I-NEXT: sltu a0, a0, s2
|
||||
; RV64I-NEXT: addi a0, a0, -1
|
||||
; RV64I-NEXT: and a0, a0, s1
|
||||
; RV64I-NEXT: subw a2, a3, a0
|
||||
; RV64I-NEXT: sw a3, 12(sp)
|
||||
; RV64I-NEXT: and a0, a0, s0
|
||||
; RV64I-NEXT: subw a2, a1, a0
|
||||
; RV64I-NEXT: sw a1, 12(sp)
|
||||
; RV64I-NEXT: addi a1, sp, 12
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: mv a0, s1
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_4
|
||||
; RV64I-NEXT: lw a3, 12(sp)
|
||||
; RV64I-NEXT: lw a1, 12(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB2_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
|
||||
@@ -519,43 +532,43 @@ define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: .cfi_offset s2, -16
|
||||
; RV32I-NEXT: mv s0, a2
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: lw a4, 0(a0)
|
||||
; RV32I-NEXT: lw a5, 4(a0)
|
||||
; RV32I-NEXT: mv s2, a1
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s2, a0
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_8
|
||||
; RV32I-NEXT: mv a4, a0
|
||||
; RV32I-NEXT: j .LBB3_3
|
||||
; RV32I-NEXT: .LBB3_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32I-NEXT: sltu a0, a5, s0
|
||||
; RV32I-NEXT: sltu a0, a1, s0
|
||||
; RV32I-NEXT: .LBB3_2: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32I-NEXT: xori a0, a0, 1
|
||||
; RV32I-NEXT: neg a0, a0
|
||||
; RV32I-NEXT: and a1, a0, s2
|
||||
; RV32I-NEXT: and a2, a0, s1
|
||||
; RV32I-NEXT: and a0, a0, s0
|
||||
; RV32I-NEXT: sltu a3, a4, a1
|
||||
; RV32I-NEXT: sub a0, a5, a0
|
||||
; RV32I-NEXT: sub a2, a4, a1
|
||||
; RV32I-NEXT: sltu a3, a4, a2
|
||||
; RV32I-NEXT: sub a0, a1, a0
|
||||
; RV32I-NEXT: sub a2, a4, a2
|
||||
; RV32I-NEXT: sub a3, a0, a3
|
||||
; RV32I-NEXT: sw a4, 8(sp)
|
||||
; RV32I-NEXT: sw a5, 12(sp)
|
||||
; RV32I-NEXT: sw a1, 12(sp)
|
||||
; RV32I-NEXT: addi a1, sp, 8
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: li a5, 5
|
||||
; RV32I-NEXT: mv a0, s1
|
||||
; RV32I-NEXT: mv a0, s2
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32I-NEXT: lw a4, 8(sp)
|
||||
; RV32I-NEXT: lw a5, 12(sp)
|
||||
; RV32I-NEXT: lw a1, 12(sp)
|
||||
; RV32I-NEXT: bnez a0, .LBB3_5
|
||||
; RV32I-NEXT: .LBB3_3: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: bne a5, s0, .LBB3_1
|
||||
; RV32I-NEXT: bne a1, s0, .LBB3_1
|
||||
; RV32I-NEXT: # %bb.4: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32I-NEXT: sltu a0, a4, s2
|
||||
; RV32I-NEXT: sltu a0, a4, s1
|
||||
; RV32I-NEXT: j .LBB3_2
|
||||
; RV32I-NEXT: .LBB3_5: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a4
|
||||
; RV32I-NEXT: mv a1, a5
|
||||
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -581,43 +594,43 @@ define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
|
||||
; RV32IA-NEXT: .cfi_offset s1, -12
|
||||
; RV32IA-NEXT: .cfi_offset s2, -16
|
||||
; RV32IA-NEXT: mv s0, a2
|
||||
; RV32IA-NEXT: mv s1, a0
|
||||
; RV32IA-NEXT: lw a4, 0(a0)
|
||||
; RV32IA-NEXT: lw a5, 4(a0)
|
||||
; RV32IA-NEXT: mv s2, a1
|
||||
; RV32IA-NEXT: mv s1, a1
|
||||
; RV32IA-NEXT: mv s2, a0
|
||||
; RV32IA-NEXT: li a1, 0
|
||||
; RV32IA-NEXT: call __atomic_load_8
|
||||
; RV32IA-NEXT: mv a4, a0
|
||||
; RV32IA-NEXT: j .LBB3_3
|
||||
; RV32IA-NEXT: .LBB3_1: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32IA-NEXT: sltu a0, a5, s0
|
||||
; RV32IA-NEXT: sltu a0, a1, s0
|
||||
; RV32IA-NEXT: .LBB3_2: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32IA-NEXT: xori a0, a0, 1
|
||||
; RV32IA-NEXT: neg a0, a0
|
||||
; RV32IA-NEXT: and a1, a0, s2
|
||||
; RV32IA-NEXT: and a2, a0, s1
|
||||
; RV32IA-NEXT: and a0, a0, s0
|
||||
; RV32IA-NEXT: sltu a3, a4, a1
|
||||
; RV32IA-NEXT: sub a0, a5, a0
|
||||
; RV32IA-NEXT: sub a2, a4, a1
|
||||
; RV32IA-NEXT: sltu a3, a4, a2
|
||||
; RV32IA-NEXT: sub a0, a1, a0
|
||||
; RV32IA-NEXT: sub a2, a4, a2
|
||||
; RV32IA-NEXT: sub a3, a0, a3
|
||||
; RV32IA-NEXT: sw a4, 8(sp)
|
||||
; RV32IA-NEXT: sw a5, 12(sp)
|
||||
; RV32IA-NEXT: sw a1, 12(sp)
|
||||
; RV32IA-NEXT: addi a1, sp, 8
|
||||
; RV32IA-NEXT: li a4, 5
|
||||
; RV32IA-NEXT: li a5, 5
|
||||
; RV32IA-NEXT: mv a0, s1
|
||||
; RV32IA-NEXT: mv a0, s2
|
||||
; RV32IA-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32IA-NEXT: lw a4, 8(sp)
|
||||
; RV32IA-NEXT: lw a5, 12(sp)
|
||||
; RV32IA-NEXT: lw a1, 12(sp)
|
||||
; RV32IA-NEXT: bnez a0, .LBB3_5
|
||||
; RV32IA-NEXT: .LBB3_3: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32IA-NEXT: bne a5, s0, .LBB3_1
|
||||
; RV32IA-NEXT: bne a1, s0, .LBB3_1
|
||||
; RV32IA-NEXT: # %bb.4: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32IA-NEXT: sltu a0, a4, s2
|
||||
; RV32IA-NEXT: sltu a0, a4, s1
|
||||
; RV32IA-NEXT: j .LBB3_2
|
||||
; RV32IA-NEXT: .LBB3_5: # %atomicrmw.end
|
||||
; RV32IA-NEXT: mv a0, a4
|
||||
; RV32IA-NEXT: mv a1, a5
|
||||
; RV32IA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32IA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32IA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -640,25 +653,27 @@ define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
|
||||
; RV64I-NEXT: .cfi_offset ra, -8
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: ld a3, 0(a0)
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_8
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: .LBB3_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: sltu a0, a3, s1
|
||||
; RV64I-NEXT: sltu a0, a1, s0
|
||||
; RV64I-NEXT: addi a0, a0, -1
|
||||
; RV64I-NEXT: and a0, a0, s1
|
||||
; RV64I-NEXT: sub a2, a3, a0
|
||||
; RV64I-NEXT: sd a3, 0(sp)
|
||||
; RV64I-NEXT: and a0, a0, s0
|
||||
; RV64I-NEXT: sub a2, a1, a0
|
||||
; RV64I-NEXT: sd a1, 0(sp)
|
||||
; RV64I-NEXT: mv a1, sp
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: mv a0, s1
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_8
|
||||
; RV64I-NEXT: ld a3, 0(sp)
|
||||
; RV64I-NEXT: ld a1, 0(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB3_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
|
||||
@@ -708,26 +723,29 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
|
||||
; RV32I-NEXT: .cfi_offset ra, -4
|
||||
; RV32I-NEXT: .cfi_offset s0, -8
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s0, a0
|
||||
; RV32I-NEXT: lbu a3, 0(a0)
|
||||
; RV32I-NEXT: zext.b s1, a1
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_1
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: zext.b s1, s1
|
||||
; RV32I-NEXT: .LBB4_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: zext.b a0, a3
|
||||
; RV32I-NEXT: sub a1, a0, s1
|
||||
; RV32I-NEXT: sltu a0, a0, a1
|
||||
; RV32I-NEXT: zext.b a0, a1
|
||||
; RV32I-NEXT: sub a2, a0, s1
|
||||
; RV32I-NEXT: sltu a0, a0, a2
|
||||
; RV32I-NEXT: addi a0, a0, -1
|
||||
; RV32I-NEXT: and a2, a0, a1
|
||||
; RV32I-NEXT: sb a3, 3(sp)
|
||||
; RV32I-NEXT: and a2, a0, a2
|
||||
; RV32I-NEXT: sb a1, 3(sp)
|
||||
; RV32I-NEXT: addi a1, sp, 3
|
||||
; RV32I-NEXT: li a3, 5
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: mv a0, s0
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_1
|
||||
; RV32I-NEXT: lbu a3, 3(sp)
|
||||
; RV32I-NEXT: lbu a1, 3(sp)
|
||||
; RV32I-NEXT: beqz a0, .LBB4_1
|
||||
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a3
|
||||
; RV32I-NEXT: mv a0, a1
|
||||
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
|
||||
@@ -785,26 +803,29 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
|
||||
; RV64I-NEXT: .cfi_offset ra, -8
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: lbu a3, 0(a0)
|
||||
; RV64I-NEXT: zext.b s1, a1
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_1
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: zext.b s1, s1
|
||||
; RV64I-NEXT: .LBB4_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: zext.b a0, a3
|
||||
; RV64I-NEXT: sub a1, a0, s1
|
||||
; RV64I-NEXT: sltu a0, a0, a1
|
||||
; RV64I-NEXT: zext.b a0, a1
|
||||
; RV64I-NEXT: sub a2, a0, s1
|
||||
; RV64I-NEXT: sltu a0, a0, a2
|
||||
; RV64I-NEXT: addi a0, a0, -1
|
||||
; RV64I-NEXT: and a2, a0, a1
|
||||
; RV64I-NEXT: sb a3, 7(sp)
|
||||
; RV64I-NEXT: and a2, a0, a2
|
||||
; RV64I-NEXT: sb a1, 7(sp)
|
||||
; RV64I-NEXT: addi a1, sp, 7
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_1
|
||||
; RV64I-NEXT: lbu a3, 7(sp)
|
||||
; RV64I-NEXT: lbu a1, 7(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB4_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
|
||||
@@ -868,28 +889,31 @@ define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
|
||||
; RV32I-NEXT: .cfi_offset s0, -8
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: .cfi_offset s2, -16
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s0, a0
|
||||
; RV32I-NEXT: lhu a3, 0(a0)
|
||||
; RV32I-NEXT: lui s1, 16
|
||||
; RV32I-NEXT: addi s1, s1, -1
|
||||
; RV32I-NEXT: and s2, a1, s1
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_2
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: lui s2, 16
|
||||
; RV32I-NEXT: addi s2, s2, -1
|
||||
; RV32I-NEXT: and s1, s1, s2
|
||||
; RV32I-NEXT: .LBB5_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: and a0, a3, s1
|
||||
; RV32I-NEXT: sub a1, a0, s2
|
||||
; RV32I-NEXT: sltu a0, a0, a1
|
||||
; RV32I-NEXT: and a0, a1, s2
|
||||
; RV32I-NEXT: sub a2, a0, s1
|
||||
; RV32I-NEXT: sltu a0, a0, a2
|
||||
; RV32I-NEXT: addi a0, a0, -1
|
||||
; RV32I-NEXT: and a2, a0, a1
|
||||
; RV32I-NEXT: sh a3, 14(sp)
|
||||
; RV32I-NEXT: and a2, a0, a2
|
||||
; RV32I-NEXT: sh a1, 14(sp)
|
||||
; RV32I-NEXT: addi a1, sp, 14
|
||||
; RV32I-NEXT: li a3, 5
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: mv a0, s0
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_2
|
||||
; RV32I-NEXT: lh a3, 14(sp)
|
||||
; RV32I-NEXT: lh a1, 14(sp)
|
||||
; RV32I-NEXT: beqz a0, .LBB5_1
|
||||
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a3
|
||||
; RV32I-NEXT: mv a0, a1
|
||||
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -952,28 +976,31 @@ define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: .cfi_offset s2, -32
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: lhu a3, 0(a0)
|
||||
; RV64I-NEXT: lui s1, 16
|
||||
; RV64I-NEXT: addi s1, s1, -1
|
||||
; RV64I-NEXT: and s2, a1, s1
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_2
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: lui s2, 16
|
||||
; RV64I-NEXT: addi s2, s2, -1
|
||||
; RV64I-NEXT: and s1, s1, s2
|
||||
; RV64I-NEXT: .LBB5_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: and a0, a3, s1
|
||||
; RV64I-NEXT: sub a1, a0, s2
|
||||
; RV64I-NEXT: sltu a0, a0, a1
|
||||
; RV64I-NEXT: and a0, a1, s2
|
||||
; RV64I-NEXT: sub a2, a0, s1
|
||||
; RV64I-NEXT: sltu a0, a0, a2
|
||||
; RV64I-NEXT: addi a0, a0, -1
|
||||
; RV64I-NEXT: and a2, a0, a1
|
||||
; RV64I-NEXT: sh a3, 14(sp)
|
||||
; RV64I-NEXT: and a2, a0, a2
|
||||
; RV64I-NEXT: sh a1, 14(sp)
|
||||
; RV64I-NEXT: addi a1, sp, 14
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_2
|
||||
; RV64I-NEXT: lh a3, 14(sp)
|
||||
; RV64I-NEXT: lh a1, 14(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB5_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
|
||||
@@ -1038,25 +1065,27 @@ define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
|
||||
; RV32I-NEXT: .cfi_offset ra, -4
|
||||
; RV32I-NEXT: .cfi_offset s0, -8
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: mv s0, a0
|
||||
; RV32I-NEXT: lw a3, 0(a0)
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s0, a1
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_4
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: sub a0, a3, s1
|
||||
; RV32I-NEXT: sltu a1, a3, a0
|
||||
; RV32I-NEXT: addi a1, a1, -1
|
||||
; RV32I-NEXT: and a2, a1, a0
|
||||
; RV32I-NEXT: sw a3, 0(sp)
|
||||
; RV32I-NEXT: sub a0, a1, s0
|
||||
; RV32I-NEXT: sltu a2, a1, a0
|
||||
; RV32I-NEXT: addi a2, a2, -1
|
||||
; RV32I-NEXT: and a2, a2, a0
|
||||
; RV32I-NEXT: sw a1, 0(sp)
|
||||
; RV32I-NEXT: mv a1, sp
|
||||
; RV32I-NEXT: li a3, 5
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: mv a0, s0
|
||||
; RV32I-NEXT: mv a0, s1
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_4
|
||||
; RV32I-NEXT: lw a3, 0(sp)
|
||||
; RV32I-NEXT: lw a1, 0(sp)
|
||||
; RV32I-NEXT: beqz a0, .LBB6_1
|
||||
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a3
|
||||
; RV32I-NEXT: mv a0, a1
|
||||
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
|
||||
@@ -1102,25 +1131,28 @@ define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
|
||||
; RV64I-NEXT: .cfi_offset ra, -8
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: lw a3, 0(a0)
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_4
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: subw a0, a3, s1
|
||||
; RV64I-NEXT: sltu a1, a3, a0
|
||||
; RV64I-NEXT: addi a1, a1, -1
|
||||
; RV64I-NEXT: and a2, a1, a0
|
||||
; RV64I-NEXT: sw a3, 4(sp)
|
||||
; RV64I-NEXT: subw a0, a1, s0
|
||||
; RV64I-NEXT: sext.w a2, a1
|
||||
; RV64I-NEXT: sltu a2, a2, a0
|
||||
; RV64I-NEXT: addi a2, a2, -1
|
||||
; RV64I-NEXT: and a2, a2, a0
|
||||
; RV64I-NEXT: sw a1, 4(sp)
|
||||
; RV64I-NEXT: addi a1, sp, 4
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: mv a0, s1
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_4
|
||||
; RV64I-NEXT: lw a3, 4(sp)
|
||||
; RV64I-NEXT: lw a1, 4(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB6_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
|
||||
@@ -1173,42 +1205,42 @@ define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: .cfi_offset s2, -16
|
||||
; RV32I-NEXT: mv s0, a2
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: lw a4, 0(a0)
|
||||
; RV32I-NEXT: lw a5, 4(a0)
|
||||
; RV32I-NEXT: mv s2, a1
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s2, a0
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_8
|
||||
; RV32I-NEXT: mv a4, a0
|
||||
; RV32I-NEXT: j .LBB7_3
|
||||
; RV32I-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB7_3 Depth=1
|
||||
; RV32I-NEXT: sltu a2, a5, a0
|
||||
; RV32I-NEXT: sltu a3, a1, a0
|
||||
; RV32I-NEXT: .LBB7_2: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB7_3 Depth=1
|
||||
; RV32I-NEXT: addi a3, a2, -1
|
||||
; RV32I-NEXT: and a2, a3, a1
|
||||
; RV32I-NEXT: addi a3, a3, -1
|
||||
; RV32I-NEXT: and a2, a3, a2
|
||||
; RV32I-NEXT: and a3, a3, a0
|
||||
; RV32I-NEXT: sw a4, 8(sp)
|
||||
; RV32I-NEXT: sw a5, 12(sp)
|
||||
; RV32I-NEXT: sw a1, 12(sp)
|
||||
; RV32I-NEXT: addi a1, sp, 8
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: li a5, 5
|
||||
; RV32I-NEXT: mv a0, s1
|
||||
; RV32I-NEXT: mv a0, s2
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32I-NEXT: lw a4, 8(sp)
|
||||
; RV32I-NEXT: lw a5, 12(sp)
|
||||
; RV32I-NEXT: lw a1, 12(sp)
|
||||
; RV32I-NEXT: bnez a0, .LBB7_5
|
||||
; RV32I-NEXT: .LBB7_3: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: sltu a0, a4, s2
|
||||
; RV32I-NEXT: sub a1, a5, s0
|
||||
; RV32I-NEXT: sub a0, a1, a0
|
||||
; RV32I-NEXT: sub a1, a4, s2
|
||||
; RV32I-NEXT: bne a0, a5, .LBB7_1
|
||||
; RV32I-NEXT: sltu a0, a4, s1
|
||||
; RV32I-NEXT: sub a2, a1, s0
|
||||
; RV32I-NEXT: sub a0, a2, a0
|
||||
; RV32I-NEXT: sub a2, a4, s1
|
||||
; RV32I-NEXT: bne a0, a1, .LBB7_1
|
||||
; RV32I-NEXT: # %bb.4: # in Loop: Header=BB7_3 Depth=1
|
||||
; RV32I-NEXT: sltu a2, a4, a1
|
||||
; RV32I-NEXT: sltu a3, a4, a2
|
||||
; RV32I-NEXT: j .LBB7_2
|
||||
; RV32I-NEXT: .LBB7_5: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a4
|
||||
; RV32I-NEXT: mv a1, a5
|
||||
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -1234,42 +1266,42 @@ define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
|
||||
; RV32IA-NEXT: .cfi_offset s1, -12
|
||||
; RV32IA-NEXT: .cfi_offset s2, -16
|
||||
; RV32IA-NEXT: mv s0, a2
|
||||
; RV32IA-NEXT: mv s1, a0
|
||||
; RV32IA-NEXT: lw a4, 0(a0)
|
||||
; RV32IA-NEXT: lw a5, 4(a0)
|
||||
; RV32IA-NEXT: mv s2, a1
|
||||
; RV32IA-NEXT: mv s1, a1
|
||||
; RV32IA-NEXT: mv s2, a0
|
||||
; RV32IA-NEXT: li a1, 0
|
||||
; RV32IA-NEXT: call __atomic_load_8
|
||||
; RV32IA-NEXT: mv a4, a0
|
||||
; RV32IA-NEXT: j .LBB7_3
|
||||
; RV32IA-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # in Loop: Header=BB7_3 Depth=1
|
||||
; RV32IA-NEXT: sltu a2, a5, a0
|
||||
; RV32IA-NEXT: sltu a3, a1, a0
|
||||
; RV32IA-NEXT: .LBB7_2: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # in Loop: Header=BB7_3 Depth=1
|
||||
; RV32IA-NEXT: addi a3, a2, -1
|
||||
; RV32IA-NEXT: and a2, a3, a1
|
||||
; RV32IA-NEXT: addi a3, a3, -1
|
||||
; RV32IA-NEXT: and a2, a3, a2
|
||||
; RV32IA-NEXT: and a3, a3, a0
|
||||
; RV32IA-NEXT: sw a4, 8(sp)
|
||||
; RV32IA-NEXT: sw a5, 12(sp)
|
||||
; RV32IA-NEXT: sw a1, 12(sp)
|
||||
; RV32IA-NEXT: addi a1, sp, 8
|
||||
; RV32IA-NEXT: li a4, 5
|
||||
; RV32IA-NEXT: li a5, 5
|
||||
; RV32IA-NEXT: mv a0, s1
|
||||
; RV32IA-NEXT: mv a0, s2
|
||||
; RV32IA-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32IA-NEXT: lw a4, 8(sp)
|
||||
; RV32IA-NEXT: lw a5, 12(sp)
|
||||
; RV32IA-NEXT: lw a1, 12(sp)
|
||||
; RV32IA-NEXT: bnez a0, .LBB7_5
|
||||
; RV32IA-NEXT: .LBB7_3: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32IA-NEXT: sltu a0, a4, s2
|
||||
; RV32IA-NEXT: sub a1, a5, s0
|
||||
; RV32IA-NEXT: sub a0, a1, a0
|
||||
; RV32IA-NEXT: sub a1, a4, s2
|
||||
; RV32IA-NEXT: bne a0, a5, .LBB7_1
|
||||
; RV32IA-NEXT: sltu a0, a4, s1
|
||||
; RV32IA-NEXT: sub a2, a1, s0
|
||||
; RV32IA-NEXT: sub a0, a2, a0
|
||||
; RV32IA-NEXT: sub a2, a4, s1
|
||||
; RV32IA-NEXT: bne a0, a1, .LBB7_1
|
||||
; RV32IA-NEXT: # %bb.4: # in Loop: Header=BB7_3 Depth=1
|
||||
; RV32IA-NEXT: sltu a2, a4, a1
|
||||
; RV32IA-NEXT: sltu a3, a4, a2
|
||||
; RV32IA-NEXT: j .LBB7_2
|
||||
; RV32IA-NEXT: .LBB7_5: # %atomicrmw.end
|
||||
; RV32IA-NEXT: mv a0, a4
|
||||
; RV32IA-NEXT: mv a1, a5
|
||||
; RV32IA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32IA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32IA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -1292,25 +1324,27 @@ define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
|
||||
; RV64I-NEXT: .cfi_offset ra, -8
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: ld a3, 0(a0)
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_8
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: sub a0, a3, s1
|
||||
; RV64I-NEXT: sltu a1, a3, a0
|
||||
; RV64I-NEXT: addi a1, a1, -1
|
||||
; RV64I-NEXT: and a2, a1, a0
|
||||
; RV64I-NEXT: sd a3, 0(sp)
|
||||
; RV64I-NEXT: sub a0, a1, s0
|
||||
; RV64I-NEXT: sltu a2, a1, a0
|
||||
; RV64I-NEXT: addi a2, a2, -1
|
||||
; RV64I-NEXT: and a2, a2, a0
|
||||
; RV64I-NEXT: sd a1, 0(sp)
|
||||
; RV64I-NEXT: mv a1, sp
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: mv a0, s1
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_8
|
||||
; RV64I-NEXT: ld a3, 0(sp)
|
||||
; RV64I-NEXT: ld a1, 0(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB7_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
|
||||
|
||||
@@ -24,26 +24,29 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
|
||||
; RV32I-NEXT: .cfi_offset ra, -4
|
||||
; RV32I-NEXT: .cfi_offset s0, -8
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s0, a0
|
||||
; RV32I-NEXT: lbu a3, 0(a0)
|
||||
; RV32I-NEXT: zext.b s1, a1
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_1
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: zext.b s1, s1
|
||||
; RV32I-NEXT: .LBB0_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: addi a0, a3, 1
|
||||
; RV32I-NEXT: zext.b a1, a3
|
||||
; RV32I-NEXT: sltu a1, a1, s1
|
||||
; RV32I-NEXT: neg a2, a1
|
||||
; RV32I-NEXT: addi a0, a1, 1
|
||||
; RV32I-NEXT: zext.b a2, a1
|
||||
; RV32I-NEXT: sltu a2, a2, s1
|
||||
; RV32I-NEXT: neg a2, a2
|
||||
; RV32I-NEXT: and a2, a2, a0
|
||||
; RV32I-NEXT: sb a3, 3(sp)
|
||||
; RV32I-NEXT: sb a1, 3(sp)
|
||||
; RV32I-NEXT: addi a1, sp, 3
|
||||
; RV32I-NEXT: li a3, 5
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: mv a0, s0
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_1
|
||||
; RV32I-NEXT: lbu a3, 3(sp)
|
||||
; RV32I-NEXT: lbu a1, 3(sp)
|
||||
; RV32I-NEXT: beqz a0, .LBB0_1
|
||||
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a3
|
||||
; RV32I-NEXT: mv a0, a1
|
||||
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
|
||||
@@ -102,26 +105,29 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
|
||||
; RV64I-NEXT: .cfi_offset ra, -8
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: lbu a3, 0(a0)
|
||||
; RV64I-NEXT: zext.b s1, a1
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_1
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: zext.b s1, s1
|
||||
; RV64I-NEXT: .LBB0_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: addi a0, a3, 1
|
||||
; RV64I-NEXT: zext.b a1, a3
|
||||
; RV64I-NEXT: sltu a1, a1, s1
|
||||
; RV64I-NEXT: neg a2, a1
|
||||
; RV64I-NEXT: addi a0, a1, 1
|
||||
; RV64I-NEXT: zext.b a2, a1
|
||||
; RV64I-NEXT: sltu a2, a2, s1
|
||||
; RV64I-NEXT: neg a2, a2
|
||||
; RV64I-NEXT: and a2, a2, a0
|
||||
; RV64I-NEXT: sb a3, 7(sp)
|
||||
; RV64I-NEXT: sb a1, 7(sp)
|
||||
; RV64I-NEXT: addi a1, sp, 7
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_1
|
||||
; RV64I-NEXT: lbu a3, 7(sp)
|
||||
; RV64I-NEXT: lbu a1, 7(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB0_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
|
||||
@@ -186,28 +192,31 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
|
||||
; RV32I-NEXT: .cfi_offset s0, -8
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: .cfi_offset s2, -16
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s0, a0
|
||||
; RV32I-NEXT: lhu a3, 0(a0)
|
||||
; RV32I-NEXT: lui s1, 16
|
||||
; RV32I-NEXT: addi s1, s1, -1
|
||||
; RV32I-NEXT: and s2, a1, s1
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_2
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: lui s2, 16
|
||||
; RV32I-NEXT: addi s2, s2, -1
|
||||
; RV32I-NEXT: and s1, s1, s2
|
||||
; RV32I-NEXT: .LBB1_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: and a0, a3, s1
|
||||
; RV32I-NEXT: addi a1, a3, 1
|
||||
; RV32I-NEXT: sltu a0, a0, s2
|
||||
; RV32I-NEXT: neg a2, a0
|
||||
; RV32I-NEXT: and a2, a2, a1
|
||||
; RV32I-NEXT: sh a3, 14(sp)
|
||||
; RV32I-NEXT: and a0, a1, s2
|
||||
; RV32I-NEXT: addi a2, a1, 1
|
||||
; RV32I-NEXT: sltu a0, a0, s1
|
||||
; RV32I-NEXT: neg a0, a0
|
||||
; RV32I-NEXT: and a2, a0, a2
|
||||
; RV32I-NEXT: sh a1, 14(sp)
|
||||
; RV32I-NEXT: addi a1, sp, 14
|
||||
; RV32I-NEXT: li a3, 5
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: mv a0, s0
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_2
|
||||
; RV32I-NEXT: lh a3, 14(sp)
|
||||
; RV32I-NEXT: lh a1, 14(sp)
|
||||
; RV32I-NEXT: beqz a0, .LBB1_1
|
||||
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a3
|
||||
; RV32I-NEXT: mv a0, a1
|
||||
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -271,28 +280,31 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: .cfi_offset s2, -32
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: lhu a3, 0(a0)
|
||||
; RV64I-NEXT: lui s1, 16
|
||||
; RV64I-NEXT: addi s1, s1, -1
|
||||
; RV64I-NEXT: and s2, a1, s1
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_2
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: lui s2, 16
|
||||
; RV64I-NEXT: addi s2, s2, -1
|
||||
; RV64I-NEXT: and s1, s1, s2
|
||||
; RV64I-NEXT: .LBB1_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: and a0, a3, s1
|
||||
; RV64I-NEXT: addi a1, a3, 1
|
||||
; RV64I-NEXT: sltu a0, a0, s2
|
||||
; RV64I-NEXT: neg a2, a0
|
||||
; RV64I-NEXT: and a2, a2, a1
|
||||
; RV64I-NEXT: sh a3, 14(sp)
|
||||
; RV64I-NEXT: and a0, a1, s2
|
||||
; RV64I-NEXT: addi a2, a1, 1
|
||||
; RV64I-NEXT: sltu a0, a0, s1
|
||||
; RV64I-NEXT: neg a0, a0
|
||||
; RV64I-NEXT: and a2, a0, a2
|
||||
; RV64I-NEXT: sh a1, 14(sp)
|
||||
; RV64I-NEXT: addi a1, sp, 14
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_2
|
||||
; RV64I-NEXT: lh a3, 14(sp)
|
||||
; RV64I-NEXT: lh a1, 14(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB1_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
|
||||
@@ -358,25 +370,27 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
|
||||
; RV32I-NEXT: .cfi_offset ra, -4
|
||||
; RV32I-NEXT: .cfi_offset s0, -8
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: mv s0, a0
|
||||
; RV32I-NEXT: lw a3, 0(a0)
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s0, a1
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_4
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: .LBB2_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: addi a0, a3, 1
|
||||
; RV32I-NEXT: sltu a1, a3, s1
|
||||
; RV32I-NEXT: neg a2, a1
|
||||
; RV32I-NEXT: addi a0, a1, 1
|
||||
; RV32I-NEXT: sltu a2, a1, s0
|
||||
; RV32I-NEXT: neg a2, a2
|
||||
; RV32I-NEXT: and a2, a2, a0
|
||||
; RV32I-NEXT: sw a3, 0(sp)
|
||||
; RV32I-NEXT: sw a1, 0(sp)
|
||||
; RV32I-NEXT: mv a1, sp
|
||||
; RV32I-NEXT: li a3, 5
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: mv a0, s0
|
||||
; RV32I-NEXT: mv a0, s1
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_4
|
||||
; RV32I-NEXT: lw a3, 0(sp)
|
||||
; RV32I-NEXT: lw a1, 0(sp)
|
||||
; RV32I-NEXT: beqz a0, .LBB2_1
|
||||
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a3
|
||||
; RV32I-NEXT: mv a0, a1
|
||||
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
|
||||
@@ -422,25 +436,29 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
|
||||
; RV64I-NEXT: .cfi_offset ra, -8
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: lw a3, 0(a0)
|
||||
; RV64I-NEXT: sext.w s1, a1
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_4
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: sext.w s1, s1
|
||||
; RV64I-NEXT: .LBB2_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: addiw a0, a3, 1
|
||||
; RV64I-NEXT: sltu a1, a3, s1
|
||||
; RV64I-NEXT: neg a2, a1
|
||||
; RV64I-NEXT: addiw a0, a1, 1
|
||||
; RV64I-NEXT: sext.w a2, a1
|
||||
; RV64I-NEXT: sltu a2, a2, s1
|
||||
; RV64I-NEXT: neg a2, a2
|
||||
; RV64I-NEXT: and a2, a2, a0
|
||||
; RV64I-NEXT: sw a3, 4(sp)
|
||||
; RV64I-NEXT: sw a1, 4(sp)
|
||||
; RV64I-NEXT: addi a1, sp, 4
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_4
|
||||
; RV64I-NEXT: lw a3, 4(sp)
|
||||
; RV64I-NEXT: lw a1, 4(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB2_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
|
||||
@@ -494,41 +512,41 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: .cfi_offset s2, -16
|
||||
; RV32I-NEXT: mv s0, a2
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: lw a4, 0(a0)
|
||||
; RV32I-NEXT: lw a5, 4(a0)
|
||||
; RV32I-NEXT: mv s2, a1
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s2, a0
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_8
|
||||
; RV32I-NEXT: mv a4, a0
|
||||
; RV32I-NEXT: j .LBB3_3
|
||||
; RV32I-NEXT: .LBB3_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32I-NEXT: sltu a0, a5, s0
|
||||
; RV32I-NEXT: sltu a0, a1, s0
|
||||
; RV32I-NEXT: .LBB3_2: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32I-NEXT: addi a1, a4, 1
|
||||
; RV32I-NEXT: addi a2, a4, 1
|
||||
; RV32I-NEXT: neg a0, a0
|
||||
; RV32I-NEXT: seqz a3, a1
|
||||
; RV32I-NEXT: and a2, a0, a1
|
||||
; RV32I-NEXT: add a3, a5, a3
|
||||
; RV32I-NEXT: seqz a3, a2
|
||||
; RV32I-NEXT: and a2, a0, a2
|
||||
; RV32I-NEXT: add a3, a1, a3
|
||||
; RV32I-NEXT: and a3, a0, a3
|
||||
; RV32I-NEXT: sw a4, 8(sp)
|
||||
; RV32I-NEXT: sw a5, 12(sp)
|
||||
; RV32I-NEXT: sw a1, 12(sp)
|
||||
; RV32I-NEXT: addi a1, sp, 8
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: li a5, 5
|
||||
; RV32I-NEXT: mv a0, s1
|
||||
; RV32I-NEXT: mv a0, s2
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32I-NEXT: lw a4, 8(sp)
|
||||
; RV32I-NEXT: lw a5, 12(sp)
|
||||
; RV32I-NEXT: lw a1, 12(sp)
|
||||
; RV32I-NEXT: bnez a0, .LBB3_5
|
||||
; RV32I-NEXT: .LBB3_3: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: bne a5, s0, .LBB3_1
|
||||
; RV32I-NEXT: bne a1, s0, .LBB3_1
|
||||
; RV32I-NEXT: # %bb.4: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32I-NEXT: sltu a0, a4, s2
|
||||
; RV32I-NEXT: sltu a0, a4, s1
|
||||
; RV32I-NEXT: j .LBB3_2
|
||||
; RV32I-NEXT: .LBB3_5: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a4
|
||||
; RV32I-NEXT: mv a1, a5
|
||||
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -554,41 +572,41 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
|
||||
; RV32IA-NEXT: .cfi_offset s1, -12
|
||||
; RV32IA-NEXT: .cfi_offset s2, -16
|
||||
; RV32IA-NEXT: mv s0, a2
|
||||
; RV32IA-NEXT: mv s1, a0
|
||||
; RV32IA-NEXT: lw a4, 0(a0)
|
||||
; RV32IA-NEXT: lw a5, 4(a0)
|
||||
; RV32IA-NEXT: mv s2, a1
|
||||
; RV32IA-NEXT: mv s1, a1
|
||||
; RV32IA-NEXT: mv s2, a0
|
||||
; RV32IA-NEXT: li a1, 0
|
||||
; RV32IA-NEXT: call __atomic_load_8
|
||||
; RV32IA-NEXT: mv a4, a0
|
||||
; RV32IA-NEXT: j .LBB3_3
|
||||
; RV32IA-NEXT: .LBB3_1: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32IA-NEXT: sltu a0, a5, s0
|
||||
; RV32IA-NEXT: sltu a0, a1, s0
|
||||
; RV32IA-NEXT: .LBB3_2: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32IA-NEXT: addi a1, a4, 1
|
||||
; RV32IA-NEXT: addi a2, a4, 1
|
||||
; RV32IA-NEXT: neg a0, a0
|
||||
; RV32IA-NEXT: seqz a3, a1
|
||||
; RV32IA-NEXT: and a2, a0, a1
|
||||
; RV32IA-NEXT: add a3, a5, a3
|
||||
; RV32IA-NEXT: seqz a3, a2
|
||||
; RV32IA-NEXT: and a2, a0, a2
|
||||
; RV32IA-NEXT: add a3, a1, a3
|
||||
; RV32IA-NEXT: and a3, a0, a3
|
||||
; RV32IA-NEXT: sw a4, 8(sp)
|
||||
; RV32IA-NEXT: sw a5, 12(sp)
|
||||
; RV32IA-NEXT: sw a1, 12(sp)
|
||||
; RV32IA-NEXT: addi a1, sp, 8
|
||||
; RV32IA-NEXT: li a4, 5
|
||||
; RV32IA-NEXT: li a5, 5
|
||||
; RV32IA-NEXT: mv a0, s1
|
||||
; RV32IA-NEXT: mv a0, s2
|
||||
; RV32IA-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32IA-NEXT: lw a4, 8(sp)
|
||||
; RV32IA-NEXT: lw a5, 12(sp)
|
||||
; RV32IA-NEXT: lw a1, 12(sp)
|
||||
; RV32IA-NEXT: bnez a0, .LBB3_5
|
||||
; RV32IA-NEXT: .LBB3_3: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32IA-NEXT: bne a5, s0, .LBB3_1
|
||||
; RV32IA-NEXT: bne a1, s0, .LBB3_1
|
||||
; RV32IA-NEXT: # %bb.4: # in Loop: Header=BB3_3 Depth=1
|
||||
; RV32IA-NEXT: sltu a0, a4, s2
|
||||
; RV32IA-NEXT: sltu a0, a4, s1
|
||||
; RV32IA-NEXT: j .LBB3_2
|
||||
; RV32IA-NEXT: .LBB3_5: # %atomicrmw.end
|
||||
; RV32IA-NEXT: mv a0, a4
|
||||
; RV32IA-NEXT: mv a1, a5
|
||||
; RV32IA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32IA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32IA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -611,25 +629,27 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
|
||||
; RV64I-NEXT: .cfi_offset ra, -8
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: ld a3, 0(a0)
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_8
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: .LBB3_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: addi a0, a3, 1
|
||||
; RV64I-NEXT: sltu a1, a3, s1
|
||||
; RV64I-NEXT: neg a2, a1
|
||||
; RV64I-NEXT: addi a0, a1, 1
|
||||
; RV64I-NEXT: sltu a2, a1, s0
|
||||
; RV64I-NEXT: neg a2, a2
|
||||
; RV64I-NEXT: and a2, a2, a0
|
||||
; RV64I-NEXT: sd a3, 0(sp)
|
||||
; RV64I-NEXT: sd a1, 0(sp)
|
||||
; RV64I-NEXT: mv a1, sp
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: mv a0, s1
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_8
|
||||
; RV64I-NEXT: ld a3, 0(sp)
|
||||
; RV64I-NEXT: ld a1, 0(sp)
|
||||
; RV64I-NEXT: beqz a0, .LBB3_1
|
||||
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
|
||||
@@ -681,35 +701,37 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
|
||||
; RV32I-NEXT: .cfi_offset s0, -8
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: .cfi_offset s2, -16
|
||||
; RV32I-NEXT: mv s0, a0
|
||||
; RV32I-NEXT: lbu a3, 0(a0)
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: zext.b s2, a1
|
||||
; RV32I-NEXT: mv s0, a1
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_1
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: zext.b s2, s0
|
||||
; RV32I-NEXT: j .LBB4_2
|
||||
; RV32I-NEXT: .LBB4_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB4_2 Depth=1
|
||||
; RV32I-NEXT: sb a3, 15(sp)
|
||||
; RV32I-NEXT: sb a1, 15(sp)
|
||||
; RV32I-NEXT: addi a1, sp, 15
|
||||
; RV32I-NEXT: li a3, 5
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: mv a0, s0
|
||||
; RV32I-NEXT: mv a0, s1
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_1
|
||||
; RV32I-NEXT: lbu a3, 15(sp)
|
||||
; RV32I-NEXT: lbu a1, 15(sp)
|
||||
; RV32I-NEXT: bnez a0, .LBB4_4
|
||||
; RV32I-NEXT: .LBB4_2: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: zext.b a0, a3
|
||||
; RV32I-NEXT: seqz a1, a0
|
||||
; RV32I-NEXT: zext.b a0, a1
|
||||
; RV32I-NEXT: seqz a2, a0
|
||||
; RV32I-NEXT: sltu a0, s2, a0
|
||||
; RV32I-NEXT: or a0, a1, a0
|
||||
; RV32I-NEXT: mv a2, s1
|
||||
; RV32I-NEXT: or a0, a2, a0
|
||||
; RV32I-NEXT: mv a2, s0
|
||||
; RV32I-NEXT: bnez a0, .LBB4_1
|
||||
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB4_2 Depth=1
|
||||
; RV32I-NEXT: addi a2, a3, -1
|
||||
; RV32I-NEXT: addi a2, a1, -1
|
||||
; RV32I-NEXT: j .LBB4_1
|
||||
; RV32I-NEXT: .LBB4_4: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a3
|
||||
; RV32I-NEXT: mv a0, a1
|
||||
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -782,35 +804,37 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: .cfi_offset s2, -32
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: lbu a3, 0(a0)
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: zext.b s2, a1
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_1
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: zext.b s2, s0
|
||||
; RV64I-NEXT: j .LBB4_2
|
||||
; RV64I-NEXT: .LBB4_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # in Loop: Header=BB4_2 Depth=1
|
||||
; RV64I-NEXT: sb a3, 15(sp)
|
||||
; RV64I-NEXT: sb a1, 15(sp)
|
||||
; RV64I-NEXT: addi a1, sp, 15
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: mv a0, s1
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_1
|
||||
; RV64I-NEXT: lbu a3, 15(sp)
|
||||
; RV64I-NEXT: lbu a1, 15(sp)
|
||||
; RV64I-NEXT: bnez a0, .LBB4_4
|
||||
; RV64I-NEXT: .LBB4_2: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: zext.b a0, a3
|
||||
; RV64I-NEXT: seqz a1, a0
|
||||
; RV64I-NEXT: zext.b a0, a1
|
||||
; RV64I-NEXT: seqz a2, a0
|
||||
; RV64I-NEXT: sltu a0, s2, a0
|
||||
; RV64I-NEXT: or a0, a1, a0
|
||||
; RV64I-NEXT: mv a2, s1
|
||||
; RV64I-NEXT: or a0, a2, a0
|
||||
; RV64I-NEXT: mv a2, s0
|
||||
; RV64I-NEXT: bnez a0, .LBB4_1
|
||||
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV64I-NEXT: # in Loop: Header=BB4_2 Depth=1
|
||||
; RV64I-NEXT: addi a2, a3, -1
|
||||
; RV64I-NEXT: addi a2, a1, -1
|
||||
; RV64I-NEXT: j .LBB4_1
|
||||
; RV64I-NEXT: .LBB4_4: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
|
||||
@@ -891,7 +915,9 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
|
||||
; RV32I-NEXT: .cfi_offset s3, -20
|
||||
; RV32I-NEXT: mv s0, a1
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: lhu a1, 0(a0)
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_2
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: lui s2, 16
|
||||
; RV32I-NEXT: addi s2, s2, -1
|
||||
; RV32I-NEXT: and s3, s0, s2
|
||||
@@ -999,7 +1025,9 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
|
||||
; RV64I-NEXT: .cfi_offset s3, -40
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: lhu a1, 0(a0)
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_2
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: lui s2, 16
|
||||
; RV64I-NEXT: addi s2, s2, -1
|
||||
; RV64I-NEXT: and s3, s0, s2
|
||||
@@ -1105,33 +1133,35 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
|
||||
; RV32I-NEXT: .cfi_offset ra, -4
|
||||
; RV32I-NEXT: .cfi_offset s0, -8
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: mv s0, a0
|
||||
; RV32I-NEXT: lw a3, 0(a0)
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s0, a1
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_4
|
||||
; RV32I-NEXT: mv a1, a0
|
||||
; RV32I-NEXT: j .LBB6_2
|
||||
; RV32I-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB6_2 Depth=1
|
||||
; RV32I-NEXT: sw a3, 0(sp)
|
||||
; RV32I-NEXT: sw a1, 0(sp)
|
||||
; RV32I-NEXT: mv a1, sp
|
||||
; RV32I-NEXT: li a3, 5
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: mv a0, s0
|
||||
; RV32I-NEXT: mv a0, s1
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_4
|
||||
; RV32I-NEXT: lw a3, 0(sp)
|
||||
; RV32I-NEXT: lw a1, 0(sp)
|
||||
; RV32I-NEXT: bnez a0, .LBB6_4
|
||||
; RV32I-NEXT: .LBB6_2: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: seqz a0, a3
|
||||
; RV32I-NEXT: sltu a1, s1, a3
|
||||
; RV32I-NEXT: or a0, a0, a1
|
||||
; RV32I-NEXT: mv a2, s1
|
||||
; RV32I-NEXT: seqz a0, a1
|
||||
; RV32I-NEXT: sltu a2, s0, a1
|
||||
; RV32I-NEXT: or a0, a0, a2
|
||||
; RV32I-NEXT: mv a2, s0
|
||||
; RV32I-NEXT: bnez a0, .LBB6_1
|
||||
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB6_2 Depth=1
|
||||
; RV32I-NEXT: addi a2, a3, -1
|
||||
; RV32I-NEXT: addi a2, a1, -1
|
||||
; RV32I-NEXT: j .LBB6_1
|
||||
; RV32I-NEXT: .LBB6_4: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a3
|
||||
; RV32I-NEXT: mv a0, a1
|
||||
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
|
||||
@@ -1189,34 +1219,37 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: .cfi_offset s2, -32
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: lw a3, 0(a0)
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: sext.w s2, a1
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_4
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: sext.w s2, s0
|
||||
; RV64I-NEXT: j .LBB6_2
|
||||
; RV64I-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # in Loop: Header=BB6_2 Depth=1
|
||||
; RV64I-NEXT: sw a3, 12(sp)
|
||||
; RV64I-NEXT: sw a1, 12(sp)
|
||||
; RV64I-NEXT: addi a1, sp, 12
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: mv a0, s1
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_4
|
||||
; RV64I-NEXT: lw a3, 12(sp)
|
||||
; RV64I-NEXT: lw a1, 12(sp)
|
||||
; RV64I-NEXT: bnez a0, .LBB6_4
|
||||
; RV64I-NEXT: .LBB6_2: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: seqz a0, a3
|
||||
; RV64I-NEXT: sltu a1, s2, a3
|
||||
; RV64I-NEXT: or a0, a0, a1
|
||||
; RV64I-NEXT: mv a2, s1
|
||||
; RV64I-NEXT: sext.w a0, a1
|
||||
; RV64I-NEXT: seqz a2, a0
|
||||
; RV64I-NEXT: sltu a0, s2, a0
|
||||
; RV64I-NEXT: or a0, a2, a0
|
||||
; RV64I-NEXT: mv a2, s0
|
||||
; RV64I-NEXT: bnez a0, .LBB6_1
|
||||
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV64I-NEXT: # in Loop: Header=BB6_2 Depth=1
|
||||
; RV64I-NEXT: addiw a2, a3, -1
|
||||
; RV64I-NEXT: addiw a2, a1, -1
|
||||
; RV64I-NEXT: j .LBB6_1
|
||||
; RV64I-NEXT: .LBB6_4: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
|
||||
@@ -1282,49 +1315,49 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
|
||||
; RV32I-NEXT: .cfi_offset s1, -12
|
||||
; RV32I-NEXT: .cfi_offset s2, -16
|
||||
; RV32I-NEXT: mv s0, a2
|
||||
; RV32I-NEXT: mv s1, a0
|
||||
; RV32I-NEXT: lw a5, 0(a0)
|
||||
; RV32I-NEXT: lw a4, 4(a0)
|
||||
; RV32I-NEXT: mv s2, a1
|
||||
; RV32I-NEXT: mv s1, a1
|
||||
; RV32I-NEXT: mv s2, a0
|
||||
; RV32I-NEXT: li a1, 0
|
||||
; RV32I-NEXT: call __atomic_load_8
|
||||
; RV32I-NEXT: mv a4, a0
|
||||
; RV32I-NEXT: j .LBB7_2
|
||||
; RV32I-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32I-NEXT: sw a5, 8(sp)
|
||||
; RV32I-NEXT: sw a4, 12(sp)
|
||||
; RV32I-NEXT: sw a4, 8(sp)
|
||||
; RV32I-NEXT: sw a1, 12(sp)
|
||||
; RV32I-NEXT: addi a1, sp, 8
|
||||
; RV32I-NEXT: li a4, 5
|
||||
; RV32I-NEXT: li a5, 5
|
||||
; RV32I-NEXT: mv a0, s1
|
||||
; RV32I-NEXT: mv a0, s2
|
||||
; RV32I-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32I-NEXT: lw a5, 8(sp)
|
||||
; RV32I-NEXT: lw a4, 12(sp)
|
||||
; RV32I-NEXT: lw a4, 8(sp)
|
||||
; RV32I-NEXT: lw a1, 12(sp)
|
||||
; RV32I-NEXT: bnez a0, .LBB7_7
|
||||
; RV32I-NEXT: .LBB7_2: # %atomicrmw.start
|
||||
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32I-NEXT: beq a4, s0, .LBB7_4
|
||||
; RV32I-NEXT: beq a1, s0, .LBB7_4
|
||||
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32I-NEXT: sltu a0, s0, a4
|
||||
; RV32I-NEXT: sltu a0, s0, a1
|
||||
; RV32I-NEXT: j .LBB7_5
|
||||
; RV32I-NEXT: .LBB7_4: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32I-NEXT: sltu a0, s2, a5
|
||||
; RV32I-NEXT: sltu a0, s1, a4
|
||||
; RV32I-NEXT: .LBB7_5: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32I-NEXT: or a1, a5, a4
|
||||
; RV32I-NEXT: seqz a1, a1
|
||||
; RV32I-NEXT: or a0, a1, a0
|
||||
; RV32I-NEXT: mv a2, s2
|
||||
; RV32I-NEXT: or a2, a4, a1
|
||||
; RV32I-NEXT: seqz a2, a2
|
||||
; RV32I-NEXT: or a0, a2, a0
|
||||
; RV32I-NEXT: mv a2, s1
|
||||
; RV32I-NEXT: mv a3, s0
|
||||
; RV32I-NEXT: bnez a0, .LBB7_1
|
||||
; RV32I-NEXT: # %bb.6: # %atomicrmw.start
|
||||
; RV32I-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32I-NEXT: seqz a0, a5
|
||||
; RV32I-NEXT: sub a3, a4, a0
|
||||
; RV32I-NEXT: addi a2, a5, -1
|
||||
; RV32I-NEXT: seqz a0, a4
|
||||
; RV32I-NEXT: sub a3, a1, a0
|
||||
; RV32I-NEXT: addi a2, a4, -1
|
||||
; RV32I-NEXT: j .LBB7_1
|
||||
; RV32I-NEXT: .LBB7_7: # %atomicrmw.end
|
||||
; RV32I-NEXT: mv a0, a5
|
||||
; RV32I-NEXT: mv a1, a4
|
||||
; RV32I-NEXT: mv a0, a4
|
||||
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -1350,49 +1383,49 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
|
||||
; RV32IA-NEXT: .cfi_offset s1, -12
|
||||
; RV32IA-NEXT: .cfi_offset s2, -16
|
||||
; RV32IA-NEXT: mv s0, a2
|
||||
; RV32IA-NEXT: mv s1, a0
|
||||
; RV32IA-NEXT: lw a5, 0(a0)
|
||||
; RV32IA-NEXT: lw a4, 4(a0)
|
||||
; RV32IA-NEXT: mv s2, a1
|
||||
; RV32IA-NEXT: mv s1, a1
|
||||
; RV32IA-NEXT: mv s2, a0
|
||||
; RV32IA-NEXT: li a1, 0
|
||||
; RV32IA-NEXT: call __atomic_load_8
|
||||
; RV32IA-NEXT: mv a4, a0
|
||||
; RV32IA-NEXT: j .LBB7_2
|
||||
; RV32IA-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32IA-NEXT: sw a5, 8(sp)
|
||||
; RV32IA-NEXT: sw a4, 12(sp)
|
||||
; RV32IA-NEXT: sw a4, 8(sp)
|
||||
; RV32IA-NEXT: sw a1, 12(sp)
|
||||
; RV32IA-NEXT: addi a1, sp, 8
|
||||
; RV32IA-NEXT: li a4, 5
|
||||
; RV32IA-NEXT: li a5, 5
|
||||
; RV32IA-NEXT: mv a0, s1
|
||||
; RV32IA-NEXT: mv a0, s2
|
||||
; RV32IA-NEXT: call __atomic_compare_exchange_8
|
||||
; RV32IA-NEXT: lw a5, 8(sp)
|
||||
; RV32IA-NEXT: lw a4, 12(sp)
|
||||
; RV32IA-NEXT: lw a4, 8(sp)
|
||||
; RV32IA-NEXT: lw a1, 12(sp)
|
||||
; RV32IA-NEXT: bnez a0, .LBB7_7
|
||||
; RV32IA-NEXT: .LBB7_2: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32IA-NEXT: beq a4, s0, .LBB7_4
|
||||
; RV32IA-NEXT: beq a1, s0, .LBB7_4
|
||||
; RV32IA-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32IA-NEXT: sltu a0, s0, a4
|
||||
; RV32IA-NEXT: sltu a0, s0, a1
|
||||
; RV32IA-NEXT: j .LBB7_5
|
||||
; RV32IA-NEXT: .LBB7_4: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32IA-NEXT: sltu a0, s2, a5
|
||||
; RV32IA-NEXT: sltu a0, s1, a4
|
||||
; RV32IA-NEXT: .LBB7_5: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32IA-NEXT: or a1, a5, a4
|
||||
; RV32IA-NEXT: seqz a1, a1
|
||||
; RV32IA-NEXT: or a0, a1, a0
|
||||
; RV32IA-NEXT: mv a2, s2
|
||||
; RV32IA-NEXT: or a2, a4, a1
|
||||
; RV32IA-NEXT: seqz a2, a2
|
||||
; RV32IA-NEXT: or a0, a2, a0
|
||||
; RV32IA-NEXT: mv a2, s1
|
||||
; RV32IA-NEXT: mv a3, s0
|
||||
; RV32IA-NEXT: bnez a0, .LBB7_1
|
||||
; RV32IA-NEXT: # %bb.6: # %atomicrmw.start
|
||||
; RV32IA-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV32IA-NEXT: seqz a0, a5
|
||||
; RV32IA-NEXT: sub a3, a4, a0
|
||||
; RV32IA-NEXT: addi a2, a5, -1
|
||||
; RV32IA-NEXT: seqz a0, a4
|
||||
; RV32IA-NEXT: sub a3, a1, a0
|
||||
; RV32IA-NEXT: addi a2, a4, -1
|
||||
; RV32IA-NEXT: j .LBB7_1
|
||||
; RV32IA-NEXT: .LBB7_7: # %atomicrmw.end
|
||||
; RV32IA-NEXT: mv a0, a5
|
||||
; RV32IA-NEXT: mv a1, a4
|
||||
; RV32IA-NEXT: mv a0, a4
|
||||
; RV32IA-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
|
||||
; RV32IA-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
|
||||
; RV32IA-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
|
||||
@@ -1415,33 +1448,35 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
|
||||
; RV64I-NEXT: .cfi_offset ra, -8
|
||||
; RV64I-NEXT: .cfi_offset s0, -16
|
||||
; RV64I-NEXT: .cfi_offset s1, -24
|
||||
; RV64I-NEXT: mv s0, a0
|
||||
; RV64I-NEXT: ld a3, 0(a0)
|
||||
; RV64I-NEXT: mv s1, a1
|
||||
; RV64I-NEXT: mv s0, a1
|
||||
; RV64I-NEXT: mv s1, a0
|
||||
; RV64I-NEXT: li a1, 0
|
||||
; RV64I-NEXT: call __atomic_load_8
|
||||
; RV64I-NEXT: mv a1, a0
|
||||
; RV64I-NEXT: j .LBB7_2
|
||||
; RV64I-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
; RV64I-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV64I-NEXT: sd a3, 0(sp)
|
||||
; RV64I-NEXT: sd a1, 0(sp)
|
||||
; RV64I-NEXT: mv a1, sp
|
||||
; RV64I-NEXT: li a3, 5
|
||||
; RV64I-NEXT: li a4, 5
|
||||
; RV64I-NEXT: mv a0, s0
|
||||
; RV64I-NEXT: mv a0, s1
|
||||
; RV64I-NEXT: call __atomic_compare_exchange_8
|
||||
; RV64I-NEXT: ld a3, 0(sp)
|
||||
; RV64I-NEXT: ld a1, 0(sp)
|
||||
; RV64I-NEXT: bnez a0, .LBB7_4
|
||||
; RV64I-NEXT: .LBB7_2: # %atomicrmw.start
|
||||
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64I-NEXT: seqz a0, a3
|
||||
; RV64I-NEXT: sltu a1, s1, a3
|
||||
; RV64I-NEXT: or a0, a0, a1
|
||||
; RV64I-NEXT: mv a2, s1
|
||||
; RV64I-NEXT: seqz a0, a1
|
||||
; RV64I-NEXT: sltu a2, s0, a1
|
||||
; RV64I-NEXT: or a0, a0, a2
|
||||
; RV64I-NEXT: mv a2, s0
|
||||
; RV64I-NEXT: bnez a0, .LBB7_1
|
||||
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV64I-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
; RV64I-NEXT: addi a2, a3, -1
|
||||
; RV64I-NEXT: addi a2, a1, -1
|
||||
; RV64I-NEXT: j .LBB7_1
|
||||
; RV64I-NEXT: .LBB7_4: # %atomicrmw.end
|
||||
; RV64I-NEXT: mv a0, a3
|
||||
; RV64I-NEXT: mv a0, a1
|
||||
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
|
||||
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
|
||||
|
||||
@@ -1357,7 +1357,9 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV32-NO-ATOMIC-NEXT: lw a1, 0(a0)
|
||||
; RV32-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV32-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV32-NO-ATOMIC-NEXT: j .LBB23_2
|
||||
; RV32-NO-ATOMIC-NEXT: .LBB23_1: # %atomicrmw.start
|
||||
; RV32-NO-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1
|
||||
@@ -1410,7 +1412,9 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: lw a1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV64-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: j .LBB23_2
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB23_1: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1
|
||||
@@ -1424,9 +1428,10 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB23_4
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: li a0, 1
|
||||
; RV64-NO-ATOMIC-NEXT: sext.w a0, a1
|
||||
; RV64-NO-ATOMIC-NEXT: li a3, 1
|
||||
; RV64-NO-ATOMIC-NEXT: mv a2, a1
|
||||
; RV64-NO-ATOMIC-NEXT: blt a0, a1, .LBB23_1
|
||||
; RV64-NO-ATOMIC-NEXT: blt a3, a0, .LBB23_1
|
||||
; RV64-NO-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: li a2, 1
|
||||
@@ -1469,7 +1474,9 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NO-ATOMIC-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV32-NO-ATOMIC-NEXT: lw a1, 0(a0)
|
||||
; RV32-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV32-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV32-NO-ATOMIC-NEXT: li s1, 2
|
||||
; RV32-NO-ATOMIC-NEXT: j .LBB24_2
|
||||
; RV32-NO-ATOMIC-NEXT: .LBB24_1: # %atomicrmw.start
|
||||
@@ -1525,7 +1532,9 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: lw a1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV64-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: li s1, 2
|
||||
; RV64-NO-ATOMIC-NEXT: j .LBB24_2
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB24_1: # %atomicrmw.start
|
||||
@@ -1540,8 +1549,9 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB24_4
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: sext.w a0, a1
|
||||
; RV64-NO-ATOMIC-NEXT: mv a2, a1
|
||||
; RV64-NO-ATOMIC-NEXT: blt a1, s1, .LBB24_1
|
||||
; RV64-NO-ATOMIC-NEXT: blt a0, s1, .LBB24_1
|
||||
; RV64-NO-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: li a2, 1
|
||||
@@ -1584,7 +1594,9 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV32-NO-ATOMIC-NEXT: lw a1, 0(a0)
|
||||
; RV32-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV32-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV32-NO-ATOMIC-NEXT: .LBB25_1: # %atomicrmw.start
|
||||
; RV32-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NO-ATOMIC-NEXT: seqz a2, a1
|
||||
@@ -1630,7 +1642,9 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: lw a1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV64-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: j .LBB25_2
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB25_1: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1
|
||||
@@ -1644,9 +1658,10 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB25_4
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB25_2: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: li a0, 1
|
||||
; RV64-NO-ATOMIC-NEXT: sext.w a0, a1
|
||||
; RV64-NO-ATOMIC-NEXT: li a3, 1
|
||||
; RV64-NO-ATOMIC-NEXT: mv a2, a1
|
||||
; RV64-NO-ATOMIC-NEXT: bltu a0, a1, .LBB25_1
|
||||
; RV64-NO-ATOMIC-NEXT: bltu a3, a0, .LBB25_1
|
||||
; RV64-NO-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: li a2, 1
|
||||
@@ -1689,7 +1704,9 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NO-ATOMIC-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV32-NO-ATOMIC-NEXT: lw a1, 0(a0)
|
||||
; RV32-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV32-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV32-NO-ATOMIC-NEXT: li s1, 2
|
||||
; RV32-NO-ATOMIC-NEXT: j .LBB26_2
|
||||
; RV32-NO-ATOMIC-NEXT: .LBB26_1: # %atomicrmw.start
|
||||
@@ -1745,7 +1762,9 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: lw a1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV64-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: li s1, 2
|
||||
; RV64-NO-ATOMIC-NEXT: j .LBB26_2
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB26_1: # %atomicrmw.start
|
||||
@@ -1760,8 +1779,9 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB26_4
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: sext.w a0, a1
|
||||
; RV64-NO-ATOMIC-NEXT: mv a2, a1
|
||||
; RV64-NO-ATOMIC-NEXT: bltu a1, s1, .LBB26_1
|
||||
; RV64-NO-ATOMIC-NEXT: bltu a0, s1, .LBB26_1
|
||||
; RV64-NO-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: li a2, 1
|
||||
@@ -1871,7 +1891,9 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NO-ATOMIC-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV32-NO-ATOMIC-NEXT: lw s1, 0(a0)
|
||||
; RV32-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV32-NO-ATOMIC-NEXT: mv s1, a0
|
||||
; RV32-NO-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start
|
||||
; RV32-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NO-ATOMIC-NEXT: lui a1, 260096
|
||||
@@ -1951,7 +1973,9 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: lw s1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV64-NO-ATOMIC-NEXT: mv s1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: lui a1, 260096
|
||||
@@ -2045,7 +2069,9 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NO-ATOMIC-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV32-NO-ATOMIC-NEXT: lw s1, 0(a0)
|
||||
; RV32-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV32-NO-ATOMIC-NEXT: mv s1, a0
|
||||
; RV32-NO-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start
|
||||
; RV32-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NO-ATOMIC-NEXT: lui a1, 784384
|
||||
@@ -2125,7 +2151,9 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: lw s1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV64-NO-ATOMIC-NEXT: mv s1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: lui a1, 784384
|
||||
@@ -2219,7 +2247,9 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NO-ATOMIC-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV32-NO-ATOMIC-NEXT: lw s1, 0(a0)
|
||||
; RV32-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV32-NO-ATOMIC-NEXT: mv s1, a0
|
||||
; RV32-NO-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start
|
||||
; RV32-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NO-ATOMIC-NEXT: lui a1, 260096
|
||||
@@ -2299,7 +2329,9 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: lw s1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV64-NO-ATOMIC-NEXT: mv s1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: lui a1, 260096
|
||||
@@ -2393,7 +2425,9 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NO-ATOMIC-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
|
||||
; RV32-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV32-NO-ATOMIC-NEXT: lw s1, 0(a0)
|
||||
; RV32-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV32-NO-ATOMIC-NEXT: mv s1, a0
|
||||
; RV32-NO-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start
|
||||
; RV32-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NO-ATOMIC-NEXT: lui a1, 260096
|
||||
@@ -2473,7 +2507,9 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: lw s1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
|
||||
; RV64-NO-ATOMIC-NEXT: mv s1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: lui a1, 260096
|
||||
@@ -3348,8 +3384,9 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a0
|
||||
; RV32-NEXT: lw a4, 0(a0)
|
||||
; RV32-NEXT: lw a1, 4(a0)
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv a4, a0
|
||||
; RV32-NEXT: j .LBB49_2
|
||||
; RV32-NEXT: .LBB49_1: # %atomicrmw.start
|
||||
; RV32-NEXT: # in Loop: Header=BB49_2 Depth=1
|
||||
@@ -3396,7 +3433,9 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: ld a1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
|
||||
; RV64-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: j .LBB49_2
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB49_1: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1
|
||||
@@ -3453,8 +3492,9 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a0
|
||||
; RV32-NEXT: lw a4, 0(a0)
|
||||
; RV32-NEXT: lw a1, 4(a0)
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv a4, a0
|
||||
; RV32-NEXT: j .LBB50_2
|
||||
; RV32-NEXT: .LBB50_1: # %atomicrmw.start
|
||||
; RV32-NEXT: # in Loop: Header=BB50_2 Depth=1
|
||||
@@ -3501,7 +3541,9 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: ld a1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
|
||||
; RV64-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: li s1, 2
|
||||
; RV64-NO-ATOMIC-NEXT: j .LBB50_2
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB50_1: # %atomicrmw.start
|
||||
@@ -3560,8 +3602,9 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a0
|
||||
; RV32-NEXT: lw a4, 0(a0)
|
||||
; RV32-NEXT: lw a1, 4(a0)
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv a4, a0
|
||||
; RV32-NEXT: j .LBB51_2
|
||||
; RV32-NEXT: .LBB51_1: # %atomicrmw.start
|
||||
; RV32-NEXT: # in Loop: Header=BB51_2 Depth=1
|
||||
@@ -3602,7 +3645,9 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: ld a1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
|
||||
; RV64-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB51_1: # %atomicrmw.start
|
||||
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV64-NO-ATOMIC-NEXT: seqz a2, a1
|
||||
@@ -3652,8 +3697,9 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a0
|
||||
; RV32-NEXT: lw a4, 0(a0)
|
||||
; RV32-NEXT: lw a1, 4(a0)
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv a4, a0
|
||||
; RV32-NEXT: j .LBB52_2
|
||||
; RV32-NEXT: .LBB52_1: # %atomicrmw.start
|
||||
; RV32-NEXT: # in Loop: Header=BB52_2 Depth=1
|
||||
@@ -3694,7 +3740,9 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: ld a1, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
|
||||
; RV64-NO-ATOMIC-NEXT: mv a1, a0
|
||||
; RV64-NO-ATOMIC-NEXT: li s1, 2
|
||||
; RV64-NO-ATOMIC-NEXT: j .LBB52_2
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB52_1: # %atomicrmw.start
|
||||
@@ -3802,8 +3850,10 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a0
|
||||
; RV32-NEXT: lw s1, 0(a0)
|
||||
; RV32-NEXT: lw s2, 4(a0)
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv s1, a0
|
||||
; RV32-NEXT: mv s2, a1
|
||||
; RV32-NEXT: .LBB54_1: # %atomicrmw.start
|
||||
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NEXT: lui a3, 261888
|
||||
@@ -3841,7 +3891,9 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: ld s2, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
|
||||
; RV64-NO-ATOMIC-NEXT: mv s2, a0
|
||||
; RV64-NO-ATOMIC-NEXT: li s1, 1023
|
||||
; RV64-NO-ATOMIC-NEXT: slli s1, s1, 52
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB54_1: # %atomicrmw.start
|
||||
@@ -3937,8 +3989,10 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a0
|
||||
; RV32-NEXT: lw s1, 0(a0)
|
||||
; RV32-NEXT: lw s2, 4(a0)
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv s1, a0
|
||||
; RV32-NEXT: mv s2, a1
|
||||
; RV32-NEXT: .LBB55_1: # %atomicrmw.start
|
||||
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NEXT: lui a3, 786176
|
||||
@@ -3976,7 +4030,9 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: ld s2, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
|
||||
; RV64-NO-ATOMIC-NEXT: mv s2, a0
|
||||
; RV64-NO-ATOMIC-NEXT: li s1, -1025
|
||||
; RV64-NO-ATOMIC-NEXT: slli s1, s1, 52
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB55_1: # %atomicrmw.start
|
||||
@@ -4072,8 +4128,10 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a0
|
||||
; RV32-NEXT: lw s1, 0(a0)
|
||||
; RV32-NEXT: lw s2, 4(a0)
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv s1, a0
|
||||
; RV32-NEXT: mv s2, a1
|
||||
; RV32-NEXT: .LBB56_1: # %atomicrmw.start
|
||||
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NEXT: lui a3, 261888
|
||||
@@ -4111,7 +4169,9 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: ld s2, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
|
||||
; RV64-NO-ATOMIC-NEXT: mv s2, a0
|
||||
; RV64-NO-ATOMIC-NEXT: li s1, 1023
|
||||
; RV64-NO-ATOMIC-NEXT: slli s1, s1, 52
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB56_1: # %atomicrmw.start
|
||||
@@ -4207,8 +4267,10 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
|
||||
; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a0
|
||||
; RV32-NEXT: lw s1, 0(a0)
|
||||
; RV32-NEXT: lw s2, 4(a0)
|
||||
; RV32-NEXT: li a1, 0
|
||||
; RV32-NEXT: call __atomic_load_8
|
||||
; RV32-NEXT: mv s1, a0
|
||||
; RV32-NEXT: mv s2, a1
|
||||
; RV32-NEXT: .LBB57_1: # %atomicrmw.start
|
||||
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NEXT: lui a3, 261888
|
||||
@@ -4246,7 +4308,9 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
|
||||
; RV64-NO-ATOMIC-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
|
||||
; RV64-NO-ATOMIC-NEXT: mv s0, a0
|
||||
; RV64-NO-ATOMIC-NEXT: ld s2, 0(a0)
|
||||
; RV64-NO-ATOMIC-NEXT: li a1, 0
|
||||
; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
|
||||
; RV64-NO-ATOMIC-NEXT: mv s2, a0
|
||||
; RV64-NO-ATOMIC-NEXT: li s1, 1023
|
||||
; RV64-NO-ATOMIC-NEXT: slli s1, s1, 52
|
||||
; RV64-NO-ATOMIC-NEXT: .LBB57_1: # %atomicrmw.start
|
||||
@@ -4530,12 +4594,16 @@ define i128 @rmw128(ptr %p) nounwind {
|
||||
; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
|
||||
; RV32-NEXT: mv s0, a1
|
||||
; RV32-NEXT: lw a4, 0(a1)
|
||||
; RV32-NEXT: lw a3, 4(a1)
|
||||
; RV32-NEXT: lw a1, 8(a1)
|
||||
; RV32-NEXT: lw a2, 12(s0)
|
||||
; RV32-NEXT: mv s1, a0
|
||||
; RV32-NEXT: mv s1, a1
|
||||
; RV32-NEXT: mv s0, a0
|
||||
; RV32-NEXT: li a0, 16
|
||||
; RV32-NEXT: addi a2, sp, 16
|
||||
; RV32-NEXT: li a3, 0
|
||||
; RV32-NEXT: call __atomic_load
|
||||
; RV32-NEXT: lw a4, 16(sp)
|
||||
; RV32-NEXT: lw a3, 20(sp)
|
||||
; RV32-NEXT: lw a1, 24(sp)
|
||||
; RV32-NEXT: lw a2, 28(sp)
|
||||
; RV32-NEXT: .LBB62_1: # %atomicrmw.start
|
||||
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; RV32-NEXT: addi a0, a4, 1
|
||||
@@ -4559,7 +4627,7 @@ define i128 @rmw128(ptr %p) nounwind {
|
||||
; RV32-NEXT: mv a3, sp
|
||||
; RV32-NEXT: li a4, 5
|
||||
; RV32-NEXT: li a5, 5
|
||||
; RV32-NEXT: mv a1, s0
|
||||
; RV32-NEXT: mv a1, s1
|
||||
; RV32-NEXT: call __atomic_compare_exchange
|
||||
; RV32-NEXT: lw a4, 16(sp)
|
||||
; RV32-NEXT: lw a3, 20(sp)
|
||||
@@ -4567,10 +4635,10 @@ define i128 @rmw128(ptr %p) nounwind {
|
||||
; RV32-NEXT: lw a2, 28(sp)
|
||||
; RV32-NEXT: beqz a0, .LBB62_1
|
||||
; RV32-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; RV32-NEXT: sw a4, 0(s1)
|
||||
; RV32-NEXT: sw a3, 4(s1)
|
||||
; RV32-NEXT: sw a1, 8(s1)
|
||||
; RV32-NEXT: sw a2, 12(s1)
|
||||
; RV32-NEXT: sw a4, 0(s0)
|
||||
; RV32-NEXT: sw a3, 4(s0)
|
||||
; RV32-NEXT: sw a1, 8(s0)
|
||||
; RV32-NEXT: sw a2, 12(s0)
|
||||
; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
|
||||
; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
|
||||
; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
|
||||
|
||||
@@ -121,9 +121,13 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
|
||||
; CHECK-NEXT: .cfi_def_cfa_register %fp
|
||||
; CHECK-NEXT: .cfi_window_save
|
||||
; CHECK-NEXT: .cfi_register %o7, %i7
|
||||
; CHECK-NEXT: ldd [%i0], %g2
|
||||
; CHECK-NEXT: mov %i0, %o0
|
||||
; CHECK-NEXT: call __atomic_load_8
|
||||
; CHECK-NEXT: mov %g0, %o1
|
||||
; CHECK-NEXT: mov %o0, %g2
|
||||
; CHECK-NEXT: add %fp, -8, %i3
|
||||
; CHECK-NEXT: mov 5, %i4
|
||||
; CHECK-NEXT: mov %o1, %g3
|
||||
; CHECK-NEXT: .LBB3_1: ! %atomicrmw.start
|
||||
; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: mov %g0, %i5
|
||||
@@ -282,9 +286,13 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
|
||||
; CHECK-NEXT: .cfi_def_cfa_register %fp
|
||||
; CHECK-NEXT: .cfi_window_save
|
||||
; CHECK-NEXT: .cfi_register %o7, %i7
|
||||
; CHECK-NEXT: ldd [%i0], %g2
|
||||
; CHECK-NEXT: mov %i0, %o0
|
||||
; CHECK-NEXT: call __atomic_load_8
|
||||
; CHECK-NEXT: mov %g0, %o1
|
||||
; CHECK-NEXT: mov %o0, %g2
|
||||
; CHECK-NEXT: add %fp, -8, %i3
|
||||
; CHECK-NEXT: mov 5, %i4
|
||||
; CHECK-NEXT: mov %o1, %g3
|
||||
; CHECK-NEXT: .LBB7_1: ! %atomicrmw.start
|
||||
; CHECK-NEXT: ! =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: mov %g0, %i5
|
||||
|
||||
@@ -6,8 +6,9 @@ define void @f1(ptr %ret, ptr %src, ptr %b) {
|
||||
; CHECK-LABEL: f1:
|
||||
; CHECK: [[FBL:%f[0-9]+]], 0(%r4)
|
||||
; CHECK: [[FBH:%f[0-9]+]], 8(%r4)
|
||||
; CHECK: [[FSL:%f[0-9]+]], 0(%r3)
|
||||
; CHECK: [[FSH:%f[0-9]+]], 8(%r3)
|
||||
; CHECK: lpq [[RIL:%r[0-9]+]], 0(%r3)
|
||||
; CHECK: ldgr [[FSL:%f[0-9]+]], [[RIL]]
|
||||
; CHECK: ldgr [[FSH:%f[0-9]+]],
|
||||
; CHECK: [[LABEL:\.L.+]]:
|
||||
; CHECK: lgdr [[RISH:%r[0-9]+]], [[FSH]]
|
||||
; CHECK: lgdr [[RISL:%r[0-9]+]], [[FSL]]
|
||||
|
||||
@@ -8,9 +8,10 @@ define void @f1(ptr %ret, ptr %src, ptr %b) {
|
||||
; CHECK: lgr [[SRC:%r[0-9]+]], %r3
|
||||
; CHECK: ld [[FBL:%f[0-9]+]], 0(%r4)
|
||||
; CHECK: ld [[FBH:%f[0-9]+]], 8(%r4)
|
||||
; CHECK: ld [[FSL:%f[0-9]+]], 0(%r3)
|
||||
; CHECK: ld [[FSH:%f[0-9]+]], 8(%r3)
|
||||
; CHECK: lpq [[RIL:%r[0-9]+]], 0(%r3)
|
||||
; CHECK: lgr [[RET:%r[0-9]+]], %r2
|
||||
; CHECK: ldgr [[FSL:%f[0-9]+]], [[RIL]]
|
||||
; CHECK: ldgr [[FSH:%f[0-9]+]],
|
||||
; CHECK: [[L:\.L.+]]:
|
||||
; CHECK: std [[FBL]], 160(%r15)
|
||||
; CHECK: std [[FBH]], 168(%r15)
|
||||
|
||||
@@ -8,9 +8,10 @@ define void @f1(ptr %ret, ptr %src, ptr %b) {
|
||||
; CHECK: lgr [[SRC:%r[0-9]+]], %r3
|
||||
; CHECK: ld [[FBL:%f[0-9]+]], 0(%r4)
|
||||
; CHECK: ld [[FBH:%f[0-9]+]], 8(%r4)
|
||||
; CHECK: ld [[FSL:%f[0-9]+]], 0(%r3)
|
||||
; CHECK: ld [[FSH:%f[0-9]+]], 8(%r3)
|
||||
; CHECK: lpq [[RIL:%r[0-9]+]], 0(%r3)
|
||||
; CHECK: lgr [[RET:%r[0-9]+]], %r2
|
||||
; CHECK: ldgr [[FSL:%f[0-9]+]], [[RIL]]
|
||||
; CHECK: ldgr [[FSH:%f[0-9]+]],
|
||||
; CHECK: [[L:\.L.+]]:
|
||||
; CHECK: std [[FBL]], 160(%r15)
|
||||
; CHECK: std [[FBH]], 168(%r15)
|
||||
|
||||
@@ -6,8 +6,9 @@ define void @f1(ptr %ret, ptr %src, ptr %b) {
|
||||
; CHECK-LABEL: f1:
|
||||
; CHECK: [[FBL:%f[0-9]+]], 0(%r4)
|
||||
; CHECK: [[FBH:%f[0-9]+]], 8(%r4)
|
||||
; CHECK: [[FSL:%f[0-9]+]], 0(%r3)
|
||||
; CHECK: [[FSH:%f[0-9]+]], 8(%r3)
|
||||
; CHECK: lpq [[RIL:%r[0-9]+]], 0(%r3)
|
||||
; CHECK: ldgr [[FSL:%f[0-9]+]], [[RIL]]
|
||||
; CHECK: ldgr [[FSH:%f[0-9]+]],
|
||||
; CHECK: [[LABEL:\.L.+]]:
|
||||
; CHECK: lgdr [[RISH:%r[0-9]+]], [[FSH]]
|
||||
; CHECK: lgdr [[RISL:%r[0-9]+]], [[FSL]]
|
||||
|
||||
@@ -12,7 +12,8 @@ define i128 @atomicrmw_xchg(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_xchg:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v1, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v0, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v0, %r0, %r1
|
||||
; CHECK-NEXT: vlgvg %r1, %v1, 1
|
||||
; CHECK-NEXT: vlgvg %r0, %v1, 0
|
||||
; CHECK-NEXT: .LBB0_1: # %atomicrmw.start
|
||||
@@ -34,7 +35,8 @@ define i128 @atomicrmw_add(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_add:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v1, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v1, %r0, %r1
|
||||
; CHECK-NEXT: .LBB1_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: vaq %v2, %v1, %v0
|
||||
@@ -57,7 +59,8 @@ define i128 @atomicrmw_sub(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_sub:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v1, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v1, %r0, %r1
|
||||
; CHECK-NEXT: .LBB2_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: vsq %v2, %v1, %v0
|
||||
@@ -80,7 +83,8 @@ define i128 @atomicrmw_and(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_and:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v1, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v1, %r0, %r1
|
||||
; CHECK-NEXT: .LBB3_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: vn %v2, %v1, %v0
|
||||
@@ -103,7 +107,8 @@ define i128 @atomicrmw_nand(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_nand:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v1, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v1, %r0, %r1
|
||||
; CHECK-NEXT: .LBB4_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: vlgvg %r1, %v1, 1
|
||||
@@ -126,7 +131,8 @@ define i128 @atomicrmw_or(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_or:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v1, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v1, %r0, %r1
|
||||
; CHECK-NEXT: .LBB5_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: vo %v2, %v1, %v0
|
||||
@@ -149,7 +155,8 @@ define i128 @atomicrmw_xor(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_xor:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v1, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v1, %r0, %r1
|
||||
; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: vx %v2, %v1, %v0
|
||||
@@ -172,7 +179,8 @@ define i128 @atomicrmw_min(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_min:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v1, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v1, %r0, %r1
|
||||
; CHECK-NEXT: j .LBB7_2
|
||||
; CHECK-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # in Loop: Header=BB7_2 Depth=1
|
||||
@@ -210,7 +218,8 @@ define i128 @atomicrmw_max(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_max:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v1, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v1, %r0, %r1
|
||||
; CHECK-NEXT: j .LBB8_2
|
||||
; CHECK-NEXT: .LBB8_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # in Loop: Header=BB8_2 Depth=1
|
||||
@@ -248,7 +257,8 @@ define i128 @atomicrmw_umin(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_umin:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v1, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v1, %r0, %r1
|
||||
; CHECK-NEXT: j .LBB9_2
|
||||
; CHECK-NEXT: .LBB9_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # in Loop: Header=BB9_2 Depth=1
|
||||
@@ -286,7 +296,8 @@ define i128 @atomicrmw_umax(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_umax:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v1, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v1, %r0, %r1
|
||||
; CHECK-NEXT: j .LBB10_2
|
||||
; CHECK-NEXT: .LBB10_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # in Loop: Header=BB10_2 Depth=1
|
||||
@@ -323,9 +334,10 @@ define i128 @atomicrmw_umax(ptr %src, i128 %b) {
|
||||
define i128 @atomicrmw_uinc_wrap(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_uinc_wrap:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: larl %r1, .LCPI11_0
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v2, 0(%r3), 4
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v2, %r0, %r1
|
||||
; CHECK-NEXT: larl %r1, .LCPI11_0
|
||||
; CHECK-NEXT: vl %v1, 0(%r1), 3
|
||||
; CHECK-NEXT: j .LBB11_2
|
||||
; CHECK-NEXT: .LBB11_1: # %atomicrmw.start
|
||||
@@ -363,11 +375,12 @@ define i128 @atomicrmw_uinc_wrap(ptr %src, i128 %b) {
|
||||
define i128 @atomicrmw_udec_wrap(ptr %src, i128 %b) {
|
||||
; CHECK-LABEL: atomicrmw_udec_wrap:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: larl %r1, .LCPI12_0
|
||||
; CHECK-NEXT: vl %v0, 0(%r4), 3
|
||||
; CHECK-NEXT: vl %v3, 0(%r3), 4
|
||||
; CHECK-NEXT: vl %v1, 0(%r1), 3
|
||||
; CHECK-NEXT: vgbm %v2, 65535
|
||||
; CHECK-NEXT: lpq %r0, 0(%r3)
|
||||
; CHECK-NEXT: vlvgp %v3, %r0, %r1
|
||||
; CHECK-NEXT: larl %r1, .LCPI12_0
|
||||
; CHECK-NEXT: vl %v1, 0(%r1), 3
|
||||
; CHECK-NEXT: j .LBB12_2
|
||||
; CHECK-NEXT: .LBB12_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # in Loop: Header=BB12_2 Depth=1
|
||||
|
||||
@@ -13,8 +13,9 @@ define void @f1(ptr align 16 %ret, ptr align 16 %src, ptr align 16 %b) {
|
||||
; CHECK-NEXT: .cfi_offset %r15, -40
|
||||
; CHECK-NEXT: lg %r1, 8(%r4)
|
||||
; CHECK-NEXT: lg %r0, 0(%r4)
|
||||
; CHECK-NEXT: lg %r4, 8(%r3)
|
||||
; CHECK-NEXT: lg %r5, 0(%r3)
|
||||
; CHECK-NEXT: lpq %r12, 0(%r3)
|
||||
; CHECK-NEXT: lgr %r4, %r13
|
||||
; CHECK-NEXT: lgr %r5, %r12
|
||||
; CHECK-NEXT: .LBB0_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: lgr %r12, %r5
|
||||
@@ -45,22 +46,23 @@ define void @f1_fpuse(ptr align 16 %ret, ptr align 16 %src, ptr align 16 %b) {
|
||||
; HARDFP-NEXT: .cfi_def_cfa_offset 336
|
||||
; HARDFP-NEXT: ld %f0, 0(%r4)
|
||||
; HARDFP-NEXT: ld %f2, 8(%r4)
|
||||
; HARDFP-NEXT: lg %r0, 8(%r3)
|
||||
; HARDFP-NEXT: lg %r1, 0(%r3)
|
||||
; HARDFP-NEXT: lpq %r12, 0(%r3)
|
||||
; HARDFP-NEXT: axbr %f0, %f0
|
||||
; HARDFP-NEXT: lgdr %r5, %f2
|
||||
; HARDFP-NEXT: lgdr %r4, %f0
|
||||
; HARDFP-NEXT: lgdr %r1, %f2
|
||||
; HARDFP-NEXT: lgdr %r0, %f0
|
||||
; HARDFP-NEXT: lgr %r4, %r13
|
||||
; HARDFP-NEXT: lgr %r5, %r12
|
||||
; HARDFP-NEXT: .LBB1_1: # %atomicrmw.start
|
||||
; HARDFP-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; HARDFP-NEXT: lgr %r12, %r1
|
||||
; HARDFP-NEXT: lgr %r13, %r0
|
||||
; HARDFP-NEXT: cdsg %r12, %r4, 0(%r3)
|
||||
; HARDFP-NEXT: lgr %r0, %r13
|
||||
; HARDFP-NEXT: lgr %r1, %r12
|
||||
; HARDFP-NEXT: lgr %r12, %r5
|
||||
; HARDFP-NEXT: lgr %r13, %r4
|
||||
; HARDFP-NEXT: cdsg %r12, %r0, 0(%r3)
|
||||
; HARDFP-NEXT: lgr %r4, %r13
|
||||
; HARDFP-NEXT: lgr %r5, %r12
|
||||
; HARDFP-NEXT: jl .LBB1_1
|
||||
; HARDFP-NEXT: # %bb.2: # %atomicrmw.end
|
||||
; HARDFP-NEXT: stg %r1, 160(%r15)
|
||||
; HARDFP-NEXT: stg %r0, 168(%r15)
|
||||
; HARDFP-NEXT: stg %r5, 160(%r15)
|
||||
; HARDFP-NEXT: stg %r4, 168(%r15)
|
||||
; HARDFP-NEXT: ld %f0, 160(%r15)
|
||||
; HARDFP-NEXT: ld %f2, 168(%r15)
|
||||
; HARDFP-NEXT: axbr %f0, %f0
|
||||
@@ -92,8 +94,9 @@ define void @f1_fpuse(ptr align 16 %ret, ptr align 16 %src, ptr align 16 %b) {
|
||||
; SOFTFP-NEXT: brasl %r14, __addtf3@PLT
|
||||
; SOFTFP-NEXT: lg %r3, 248(%r15)
|
||||
; SOFTFP-NEXT: lg %r2, 240(%r15)
|
||||
; SOFTFP-NEXT: lg %r0, 8(%r12)
|
||||
; SOFTFP-NEXT: lg %r1, 0(%r12)
|
||||
; SOFTFP-NEXT: lpq %r4, 0(%r12)
|
||||
; SOFTFP-NEXT: lgr %r0, %r5
|
||||
; SOFTFP-NEXT: lgr %r1, %r4
|
||||
; SOFTFP-NEXT: .LBB1_1: # %atomicrmw.start
|
||||
; SOFTFP-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; SOFTFP-NEXT: lgr %r4, %r1
|
||||
|
||||
@@ -17,7 +17,7 @@ define signext i8 @test_atomic_fetch_add_1() {
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea.sl %s0, c@hi(, %s0)
|
||||
; CHECK-NEXT: and %s0, -4, %s0
|
||||
; CHECK-NEXT: ldl.sx %s2, (, %s0)
|
||||
; CHECK-NEXT: ldl.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: lea %s1, -256
|
||||
; CHECK-NEXT: and %s1, %s1, (32)0
|
||||
; CHECK-NEXT: .LBB0_1: # %atomicrmw.start
|
||||
@@ -48,7 +48,7 @@ define signext i16 @test_atomic_fetch_sub_2() {
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea.sl %s0, s@hi(, %s0)
|
||||
; CHECK-NEXT: and %s0, -4, %s0
|
||||
; CHECK-NEXT: ldl.sx %s2, (, %s0)
|
||||
; CHECK-NEXT: ldl.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: lea %s1, -65536
|
||||
; CHECK-NEXT: and %s1, %s1, (32)0
|
||||
; CHECK-NEXT: .LBB1_1: # %atomicrmw.start
|
||||
@@ -78,7 +78,7 @@ define signext i32 @test_atomic_fetch_and_4() {
|
||||
; CHECK-NEXT: lea %s0, i@lo
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea.sl %s0, i@hi(, %s0)
|
||||
; CHECK-NEXT: ldl.sx %s1, (, %s0)
|
||||
; CHECK-NEXT: ldl.zx %s1, (, %s0)
|
||||
; CHECK-NEXT: .LBB2_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: or %s2, 0, %s1
|
||||
@@ -125,7 +125,7 @@ define signext i8 @test_atomic_fetch_xor_1() {
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea.sl %s0, c@hi(, %s0)
|
||||
; CHECK-NEXT: and %s1, -4, %s0
|
||||
; CHECK-NEXT: ldl.sx %s0, (, %s1)
|
||||
; CHECK-NEXT: ldl.zx %s0, (, %s1)
|
||||
; CHECK-NEXT: .LBB4_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: or %s2, 0, %s0
|
||||
@@ -151,7 +151,7 @@ define signext i16 @test_atomic_fetch_nand_2() {
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea.sl %s0, s@hi(, %s0)
|
||||
; CHECK-NEXT: and %s0, -4, %s0
|
||||
; CHECK-NEXT: ldl.sx %s2, (, %s0)
|
||||
; CHECK-NEXT: ldl.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: lea %s1, 65534
|
||||
; CHECK-NEXT: lea %s3, -65536
|
||||
; CHECK-NEXT: and %s3, %s3, (32)0
|
||||
@@ -183,7 +183,7 @@ define signext i32 @test_atomic_fetch_max_4() {
|
||||
; CHECK-NEXT: lea %s0, i@lo
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea.sl %s1, i@hi(, %s0)
|
||||
; CHECK-NEXT: ldl.sx %s0, (, %s1)
|
||||
; CHECK-NEXT: ldl.zx %s0, (, %s1)
|
||||
; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: or %s2, 0, %s0
|
||||
@@ -207,7 +207,7 @@ define signext i32 @test_atomic_fetch_min_4() {
|
||||
; CHECK-NEXT: lea %s0, i@lo
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea.sl %s1, i@hi(, %s0)
|
||||
; CHECK-NEXT: ldl.sx %s0, (, %s1)
|
||||
; CHECK-NEXT: ldl.zx %s0, (, %s1)
|
||||
; CHECK-NEXT: .LBB7_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: or %s2, 0, %s0
|
||||
@@ -231,7 +231,7 @@ define signext i32 @test_atomic_fetch_umax_4() {
|
||||
; CHECK-NEXT: lea %s0, i@lo
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea.sl %s0, i@hi(, %s0)
|
||||
; CHECK-NEXT: ldl.sx %s1, (, %s0)
|
||||
; CHECK-NEXT: ldl.zx %s1, (, %s0)
|
||||
; CHECK-NEXT: .LBB8_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: or %s2, 0, %s1
|
||||
@@ -257,7 +257,7 @@ define signext i32 @test_atomic_fetch_umin_4() {
|
||||
; CHECK-NEXT: lea %s0, i@lo
|
||||
; CHECK-NEXT: and %s0, %s0, (32)0
|
||||
; CHECK-NEXT: lea.sl %s0, i@hi(, %s0)
|
||||
; CHECK-NEXT: ldl.sx %s1, (, %s0)
|
||||
; CHECK-NEXT: ldl.zx %s1, (, %s0)
|
||||
; CHECK-NEXT: .LBB9_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: or %s2, 0, %s1
|
||||
|
||||
@@ -85,7 +85,7 @@ define zeroext i1 @_Z26atomic_cmp_swap_relaxed_i1RNSt3__16atomicIbEERbb(ptr noca
|
||||
; CHECK-NEXT: and %s3, -4, %s0
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s3)
|
||||
; CHECK-NEXT: sla.w.sx %s6, (56)0, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -130,7 +130,7 @@ define signext i8 @_Z26atomic_cmp_swap_relaxed_i8RNSt3__16atomicIcEERcc(ptr noca
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s5, (56)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s6, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s6, (, %s3)
|
||||
; CHECK-NEXT: and %s2, %s2, (56)0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -174,7 +174,7 @@ define zeroext i8 @_Z26atomic_cmp_swap_relaxed_u8RNSt3__16atomicIhEERhh(ptr noca
|
||||
; CHECK-NEXT: and %s3, -4, %s0
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s3)
|
||||
; CHECK-NEXT: sla.w.sx %s6, (56)0, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -219,7 +219,7 @@ define signext i16 @_Z27atomic_cmp_swap_relaxed_i16RNSt3__16atomicIsEERss(ptr no
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s5, (48)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s6, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s6, (, %s3)
|
||||
; CHECK-NEXT: and %s2, %s2, (48)0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -263,7 +263,7 @@ define zeroext i16 @_Z27atomic_cmp_swap_relaxed_u16RNSt3__16atomicItEERtt(ptr no
|
||||
; CHECK-NEXT: and %s3, -4, %s0
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s3)
|
||||
; CHECK-NEXT: sla.w.sx %s6, (48)0, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -525,7 +525,7 @@ define zeroext i1 @_Z26atomic_cmp_swap_acquire_i1RNSt3__16atomicIbEERbb(ptr noca
|
||||
; CHECK-NEXT: and %s3, -4, %s0
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s3)
|
||||
; CHECK-NEXT: sla.w.sx %s6, (56)0, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -571,7 +571,7 @@ define signext i8 @_Z26atomic_cmp_swap_acquire_i8RNSt3__16atomicIcEERcc(ptr noca
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s5, (56)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s6, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s6, (, %s3)
|
||||
; CHECK-NEXT: and %s2, %s2, (56)0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -616,7 +616,7 @@ define zeroext i8 @_Z26atomic_cmp_swap_acquire_u8RNSt3__16atomicIhEERhh(ptr noca
|
||||
; CHECK-NEXT: and %s3, -4, %s0
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s3)
|
||||
; CHECK-NEXT: sla.w.sx %s6, (56)0, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -662,7 +662,7 @@ define signext i16 @_Z27atomic_cmp_swap_acquire_i16RNSt3__16atomicIsEERss(ptr no
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s5, (48)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s6, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s6, (, %s3)
|
||||
; CHECK-NEXT: and %s2, %s2, (48)0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -707,7 +707,7 @@ define zeroext i16 @_Z27atomic_cmp_swap_acquire_u16RNSt3__16atomicItEERtt(ptr no
|
||||
; CHECK-NEXT: and %s3, -4, %s0
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s3)
|
||||
; CHECK-NEXT: sla.w.sx %s6, (48)0, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -975,7 +975,7 @@ define zeroext i1 @_Z26atomic_cmp_swap_seq_cst_i1RNSt3__16atomicIbEERbb(ptr noca
|
||||
; CHECK-NEXT: and %s3, -4, %s0
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s3)
|
||||
; CHECK-NEXT: sla.w.sx %s6, (56)0, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -1022,7 +1022,7 @@ define signext i8 @_Z26atomic_cmp_swap_seq_cst_i8RNSt3__16atomicIcEERcc(ptr noca
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s5, (56)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s6, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s6, (, %s3)
|
||||
; CHECK-NEXT: and %s2, %s2, (56)0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -1068,7 +1068,7 @@ define zeroext i8 @_Z26atomic_cmp_swap_seq_cst_u8RNSt3__16atomicIhEERhh(ptr noca
|
||||
; CHECK-NEXT: and %s3, -4, %s0
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s3)
|
||||
; CHECK-NEXT: sla.w.sx %s6, (56)0, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -1115,7 +1115,7 @@ define signext i16 @_Z27atomic_cmp_swap_seq_cst_i16RNSt3__16atomicIsEERss(ptr no
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s5, (48)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s6, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s6, (, %s3)
|
||||
; CHECK-NEXT: and %s2, %s2, (48)0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -1161,7 +1161,7 @@ define zeroext i16 @_Z27atomic_cmp_swap_seq_cst_u16RNSt3__16atomicItEERtt(ptr no
|
||||
; CHECK-NEXT: and %s3, -4, %s0
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s3)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s3)
|
||||
; CHECK-NEXT: sla.w.sx %s6, (48)0, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s2, %s2, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, %s0
|
||||
@@ -2004,28 +2004,28 @@ bb:
|
||||
define zeroext i1 @_Z29atomic_cmp_swap_relaxed_gv_i1Rbb(ptr nocapture nonnull align 1 dereferenceable(1) %arg, i1 zeroext %arg1) {
|
||||
; CHECK-LABEL: _Z29atomic_cmp_swap_relaxed_gv_i1Rbb:
|
||||
; CHECK: # %bb.0: # %bb
|
||||
; CHECK-NEXT: and %s2, %s1, (32)0
|
||||
; CHECK-NEXT: lea %s1, gv_i1@lo
|
||||
; CHECK-NEXT: ld1b.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: lea %s3, gv_i1@lo
|
||||
; CHECK-NEXT: and %s3, %s3, (32)0
|
||||
; CHECK-NEXT: lea.sl %s3, gv_i1@hi(, %s3)
|
||||
; CHECK-NEXT: and %s3, -4, %s3
|
||||
; CHECK-NEXT: ldl.zx %s4, (, %s3)
|
||||
; CHECK-NEXT: and %s1, %s1, (32)0
|
||||
; CHECK-NEXT: lea.sl %s1, gv_i1@hi(, %s1)
|
||||
; CHECK-NEXT: and %s1, -4, %s1
|
||||
; CHECK-NEXT: ldl.zx %s4, (, %s1)
|
||||
; CHECK-NEXT: ld1b.zx %s3, (, %s0)
|
||||
; CHECK-NEXT: lea %s5, -256
|
||||
; CHECK-NEXT: and %s5, %s5, (32)0
|
||||
; CHECK-NEXT: and %s4, %s4, %s5
|
||||
; CHECK-NEXT: and %s4, %s4, (32)0
|
||||
; CHECK-NEXT: or %s1, %s4, %s1
|
||||
; CHECK-NEXT: or %s2, %s4, %s2
|
||||
; CHECK-NEXT: or %s3, %s4, %s3
|
||||
; CHECK-NEXT: cas.w %s2, (%s1), %s3
|
||||
; CHECK-NEXT: cmps.w.sx %s3, %s2, %s3
|
||||
; CHECK-NEXT: or %s1, 0, (0)1
|
||||
; CHECK-NEXT: cmov.w.eq %s1, (63)0, %s3
|
||||
; CHECK-NEXT: brne.w 0, %s1, .LBB44_2
|
||||
; CHECK-NEXT: cas.w %s1, (%s3), %s2
|
||||
; CHECK-NEXT: cmps.w.sx %s3, %s1, %s2
|
||||
; CHECK-NEXT: or %s2, 0, (0)1
|
||||
; CHECK-NEXT: cmov.w.eq %s2, (63)0, %s3
|
||||
; CHECK-NEXT: brne.w 0, %s2, .LBB44_2
|
||||
; CHECK-NEXT: # %bb.1: # %bb5
|
||||
; CHECK-NEXT: st1b %s2, (, %s0)
|
||||
; CHECK-NEXT: st1b %s1, (, %s0)
|
||||
; CHECK-NEXT: .LBB44_2: # %bb7
|
||||
; CHECK-NEXT: adds.w.zx %s0, %s1, (0)1
|
||||
; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
bb:
|
||||
%i = zext i1 %arg1 to i8
|
||||
@@ -2091,28 +2091,28 @@ bb6: ; preds = %bb4, %bb
|
||||
define zeroext i8 @_Z29atomic_cmp_swap_relaxed_gv_u8Rhh(ptr nocapture nonnull align 1 dereferenceable(1) %arg, i8 zeroext %arg1) {
|
||||
; CHECK-LABEL: _Z29atomic_cmp_swap_relaxed_gv_u8Rhh:
|
||||
; CHECK: # %bb.0: # %bb
|
||||
; CHECK-NEXT: and %s2, %s1, (32)0
|
||||
; CHECK-NEXT: lea %s1, gv_u8@lo
|
||||
; CHECK-NEXT: ld1b.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: lea %s3, gv_u8@lo
|
||||
; CHECK-NEXT: and %s3, %s3, (32)0
|
||||
; CHECK-NEXT: lea.sl %s3, gv_u8@hi(, %s3)
|
||||
; CHECK-NEXT: and %s3, -4, %s3
|
||||
; CHECK-NEXT: ldl.zx %s4, (, %s3)
|
||||
; CHECK-NEXT: and %s1, %s1, (32)0
|
||||
; CHECK-NEXT: lea.sl %s1, gv_u8@hi(, %s1)
|
||||
; CHECK-NEXT: and %s1, -4, %s1
|
||||
; CHECK-NEXT: ldl.zx %s4, (, %s1)
|
||||
; CHECK-NEXT: ld1b.zx %s3, (, %s0)
|
||||
; CHECK-NEXT: lea %s5, -256
|
||||
; CHECK-NEXT: and %s5, %s5, (32)0
|
||||
; CHECK-NEXT: and %s4, %s4, %s5
|
||||
; CHECK-NEXT: and %s4, %s4, (32)0
|
||||
; CHECK-NEXT: or %s1, %s4, %s1
|
||||
; CHECK-NEXT: or %s2, %s4, %s2
|
||||
; CHECK-NEXT: or %s3, %s4, %s3
|
||||
; CHECK-NEXT: cas.w %s2, (%s1), %s3
|
||||
; CHECK-NEXT: cmps.w.sx %s3, %s2, %s3
|
||||
; CHECK-NEXT: or %s1, 0, (0)1
|
||||
; CHECK-NEXT: cmov.w.eq %s1, (63)0, %s3
|
||||
; CHECK-NEXT: brne.w 0, %s1, .LBB46_2
|
||||
; CHECK-NEXT: cas.w %s1, (%s3), %s2
|
||||
; CHECK-NEXT: cmps.w.sx %s3, %s1, %s2
|
||||
; CHECK-NEXT: or %s2, 0, (0)1
|
||||
; CHECK-NEXT: cmov.w.eq %s2, (63)0, %s3
|
||||
; CHECK-NEXT: brne.w 0, %s2, .LBB46_2
|
||||
; CHECK-NEXT: # %bb.1: # %bb4
|
||||
; CHECK-NEXT: st1b %s2, (, %s0)
|
||||
; CHECK-NEXT: st1b %s1, (, %s0)
|
||||
; CHECK-NEXT: .LBB46_2: # %bb6
|
||||
; CHECK-NEXT: adds.w.zx %s0, %s1, (0)1
|
||||
; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
bb:
|
||||
%i = load i8, ptr %arg, align 1
|
||||
@@ -2134,19 +2134,22 @@ bb6: ; preds = %bb4, %bb
|
||||
define signext i16 @_Z30atomic_cmp_swap_relaxed_gv_i16Rss(ptr nocapture nonnull align 2 dereferenceable(2) %arg, i16 signext %arg1) {
|
||||
; CHECK-LABEL: _Z30atomic_cmp_swap_relaxed_gv_i16Rss:
|
||||
; CHECK: # %bb.0: # %bb
|
||||
; CHECK-NEXT: lea %s2, gv_i16@lo
|
||||
; CHECK-NEXT: and %s2, %s2, (32)0
|
||||
; CHECK-NEXT: lea.sl %s2, gv_i16@hi(, %s2)
|
||||
; CHECK-NEXT: and %s2, -4, %s2
|
||||
; CHECK-NEXT: ld2b.zx %s4, 2(, %s2)
|
||||
; CHECK-NEXT: ld2b.zx %s3, (, %s0)
|
||||
; CHECK-NEXT: ld2b.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: lea %s3, gv_i16@lo
|
||||
; CHECK-NEXT: and %s3, %s3, (32)0
|
||||
; CHECK-NEXT: lea.sl %s3, gv_i16@hi(, %s3)
|
||||
; CHECK-NEXT: and %s3, -4, %s3
|
||||
; CHECK-NEXT: and %s1, %s1, (48)0
|
||||
; CHECK-NEXT: ldl.zx %s4, (, %s3)
|
||||
; CHECK-NEXT: and %s1, %s1, (32)0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, 16
|
||||
; CHECK-NEXT: lea %s5, -65536
|
||||
; CHECK-NEXT: and %s5, %s5, (32)0
|
||||
; CHECK-NEXT: and %s4, %s4, %s5
|
||||
; CHECK-NEXT: and %s4, %s4, (32)0
|
||||
; CHECK-NEXT: or %s1, %s4, %s1
|
||||
; CHECK-NEXT: or %s3, %s4, %s3
|
||||
; CHECK-NEXT: cas.w %s1, (%s2), %s3
|
||||
; CHECK-NEXT: cmps.w.sx %s3, %s1, %s3
|
||||
; CHECK-NEXT: or %s2, %s4, %s2
|
||||
; CHECK-NEXT: cas.w %s1, (%s3), %s2
|
||||
; CHECK-NEXT: cmps.w.sx %s3, %s1, %s2
|
||||
; CHECK-NEXT: or %s2, 0, (0)1
|
||||
; CHECK-NEXT: cmov.w.eq %s2, (63)0, %s3
|
||||
; CHECK-NEXT: brne.w 0, %s2, .LBB47_2
|
||||
@@ -2175,18 +2178,21 @@ bb6: ; preds = %bb4, %bb
|
||||
define zeroext i16 @_Z30atomic_cmp_swap_relaxed_gv_u16Rtt(ptr nocapture nonnull align 2 dereferenceable(2) %arg, i16 zeroext %arg1) {
|
||||
; CHECK-LABEL: _Z30atomic_cmp_swap_relaxed_gv_u16Rtt:
|
||||
; CHECK: # %bb.0: # %bb
|
||||
; CHECK-NEXT: lea %s2, gv_u16@lo
|
||||
; CHECK-NEXT: and %s2, %s2, (32)0
|
||||
; CHECK-NEXT: lea.sl %s2, gv_u16@hi(, %s2)
|
||||
; CHECK-NEXT: and %s2, -4, %s2
|
||||
; CHECK-NEXT: ld2b.zx %s4, 2(, %s2)
|
||||
; CHECK-NEXT: ld2b.zx %s3, (, %s0)
|
||||
; CHECK-NEXT: ld2b.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: lea %s3, gv_u16@lo
|
||||
; CHECK-NEXT: and %s3, %s3, (32)0
|
||||
; CHECK-NEXT: lea.sl %s3, gv_u16@hi(, %s3)
|
||||
; CHECK-NEXT: and %s3, -4, %s3
|
||||
; CHECK-NEXT: ldl.zx %s4, (, %s3)
|
||||
; CHECK-NEXT: and %s1, %s1, (32)0
|
||||
; CHECK-NEXT: sla.w.sx %s4, %s4, 16
|
||||
; CHECK-NEXT: lea %s5, -65536
|
||||
; CHECK-NEXT: and %s5, %s5, (32)0
|
||||
; CHECK-NEXT: and %s4, %s4, %s5
|
||||
; CHECK-NEXT: and %s4, %s4, (32)0
|
||||
; CHECK-NEXT: or %s1, %s4, %s1
|
||||
; CHECK-NEXT: or %s3, %s4, %s3
|
||||
; CHECK-NEXT: cas.w %s1, (%s2), %s3
|
||||
; CHECK-NEXT: cmps.w.sx %s3, %s1, %s3
|
||||
; CHECK-NEXT: or %s2, %s4, %s2
|
||||
; CHECK-NEXT: cas.w %s1, (%s3), %s2
|
||||
; CHECK-NEXT: cmps.w.sx %s3, %s1, %s2
|
||||
; CHECK-NEXT: or %s2, 0, (0)1
|
||||
; CHECK-NEXT: cmov.w.eq %s2, (63)0, %s3
|
||||
; CHECK-NEXT: brne.w 0, %s2, .LBB48_2
|
||||
|
||||
@@ -10,7 +10,7 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s3, (56)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s2)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s2)
|
||||
; CHECK-NEXT: xor %s3, -1, %s3
|
||||
; CHECK-NEXT: and %s3, %s3, (32)0
|
||||
; CHECK-NEXT: and %s4, %s1, (56)0
|
||||
@@ -47,7 +47,7 @@ define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s3, (48)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s2)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s2)
|
||||
; CHECK-NEXT: xor %s3, -1, %s3
|
||||
; CHECK-NEXT: and %s3, %s3, (32)0
|
||||
; CHECK-NEXT: and %s4, %s1, (48)0
|
||||
@@ -79,7 +79,7 @@ define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
|
||||
; CHECK-LABEL: atomicrmw_usub_cond_i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: fencem 3
|
||||
; CHECK-NEXT: ldl.sx %s2, (, %s0)
|
||||
; CHECK-NEXT: ldl.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: and %s1, %s1, (32)0
|
||||
; CHECK-NEXT: .LBB2_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -128,7 +128,7 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s2, (56)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s4, (, %s1)
|
||||
; CHECK-NEXT: ldl.zx %s4, (, %s1)
|
||||
; CHECK-NEXT: xor %s2, -1, %s2
|
||||
; CHECK-NEXT: and %s2, %s2, (32)0
|
||||
; CHECK-NEXT: and %s3, %s3, (56)0
|
||||
@@ -164,7 +164,7 @@ define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s2, (48)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s4, (, %s1)
|
||||
; CHECK-NEXT: ldl.zx %s4, (, %s1)
|
||||
; CHECK-NEXT: xor %s2, -1, %s2
|
||||
; CHECK-NEXT: and %s2, %s2, (32)0
|
||||
; CHECK-NEXT: and %s3, %s3, (48)0
|
||||
@@ -195,7 +195,7 @@ define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
|
||||
; CHECK-LABEL: atomicrmw_usub_sat_i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: fencem 3
|
||||
; CHECK-NEXT: ldl.sx %s2, (, %s0)
|
||||
; CHECK-NEXT: ldl.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: and %s1, %s1, (32)0
|
||||
; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
|
||||
@@ -10,7 +10,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s2, (56)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s4, (, %s1)
|
||||
; CHECK-NEXT: ldl.zx %s4, (, %s1)
|
||||
; CHECK-NEXT: xor %s2, -1, %s2
|
||||
; CHECK-NEXT: and %s2, %s2, (32)0
|
||||
; CHECK-NEXT: and %s3, %s3, (56)0
|
||||
@@ -47,7 +47,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s2, (48)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s4, (, %s1)
|
||||
; CHECK-NEXT: ldl.zx %s4, (, %s1)
|
||||
; CHECK-NEXT: xor %s2, -1, %s2
|
||||
; CHECK-NEXT: and %s2, %s2, (32)0
|
||||
; CHECK-NEXT: and %s3, %s3, (48)0
|
||||
@@ -79,7 +79,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
|
||||
; CHECK-LABEL: atomicrmw_uinc_wrap_i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: fencem 3
|
||||
; CHECK-NEXT: ldl.sx %s2, (, %s0)
|
||||
; CHECK-NEXT: ldl.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: and %s1, %s1, (32)0
|
||||
; CHECK-NEXT: .LBB2_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
@@ -127,7 +127,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s3, (56)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s2)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s2)
|
||||
; CHECK-NEXT: xor %s3, -1, %s3
|
||||
; CHECK-NEXT: and %s3, %s3, (32)0
|
||||
; CHECK-NEXT: and %s4, %s1, (56)0
|
||||
@@ -165,7 +165,7 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
|
||||
; CHECK-NEXT: and %s0, 3, %s0
|
||||
; CHECK-NEXT: sla.w.sx %s0, %s0, 3
|
||||
; CHECK-NEXT: sla.w.sx %s3, (48)0, %s0
|
||||
; CHECK-NEXT: ldl.sx %s5, (, %s2)
|
||||
; CHECK-NEXT: ldl.zx %s5, (, %s2)
|
||||
; CHECK-NEXT: xor %s3, -1, %s3
|
||||
; CHECK-NEXT: and %s3, %s3, (32)0
|
||||
; CHECK-NEXT: and %s4, %s1, (48)0
|
||||
@@ -198,7 +198,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
|
||||
; CHECK-LABEL: atomicrmw_udec_wrap_i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: fencem 3
|
||||
; CHECK-NEXT: ldl.sx %s2, (, %s0)
|
||||
; CHECK-NEXT: ldl.zx %s2, (, %s0)
|
||||
; CHECK-NEXT: and %s1, %s1, (32)0
|
||||
; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @test_atomicrmw_fadd_f32(
|
||||
; CHECK-NEXT: call void @llvm.arm.dmb(i32 11)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP6]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -27,7 +28,8 @@ define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
|
||||
define float @test_atomicrmw_fsub_f32(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @test_atomicrmw_fsub_f32(
|
||||
; CHECK-NEXT: call void @llvm.arm.dmb(i32 11)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP6]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
|
||||
define float @atomicrmw_fadd_float(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @atomicrmw_fadd_float(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP6]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -24,7 +25,8 @@ define float @atomicrmw_fadd_float(ptr %ptr, float %value) {
|
||||
|
||||
define float @atomicrmw_fsub_float(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @atomicrmw_fsub_float(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP6]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -45,7 +47,8 @@ define float @atomicrmw_fsub_float(ptr %ptr, float %value) {
|
||||
|
||||
define float @atomicrmw_fmin_float(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @atomicrmw_fmin_float(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP7]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -66,7 +69,8 @@ define float @atomicrmw_fmin_float(ptr %ptr, float %value) {
|
||||
|
||||
define float @atomicrmw_fmax_float(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @atomicrmw_fmax_float(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP7]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -87,7 +91,8 @@ define float @atomicrmw_fmax_float(ptr %ptr, float %value) {
|
||||
|
||||
define double @atomicrmw_fadd_double(ptr %ptr, double %value) {
|
||||
; CHECK-LABEL: @atomicrmw_fadd_double(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load atomic i64, ptr [[PTR:%.*]] monotonic, align 8
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[TMP6]] to double
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -108,7 +113,8 @@ define double @atomicrmw_fadd_double(ptr %ptr, double %value) {
|
||||
|
||||
define double @atomicrmw_fsub_double(ptr %ptr, double %value) {
|
||||
; CHECK-LABEL: @atomicrmw_fsub_double(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load atomic i64, ptr [[PTR:%.*]] monotonic, align 8
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[TMP6]] to double
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -129,7 +135,8 @@ define double @atomicrmw_fsub_double(ptr %ptr, double %value) {
|
||||
|
||||
define double @atomicrmw_fmin_double(ptr %ptr, double %value) {
|
||||
; CHECK-LABEL: @atomicrmw_fmin_double(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = load atomic i64, ptr [[PTR:%.*]] monotonic, align 8
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[TMP7]] to double
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -150,7 +157,8 @@ define double @atomicrmw_fmin_double(ptr %ptr, double %value) {
|
||||
|
||||
define double @atomicrmw_fmax_double(ptr %ptr, double %value) {
|
||||
; CHECK-LABEL: @atomicrmw_fmax_double(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = load atomic i64, ptr [[PTR:%.*]] monotonic, align 8
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[TMP7]] to double
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @test_atomicrmw_fadd_f32(
|
||||
; CHECK-NEXT: fence seq_cst
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP6]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -27,7 +28,8 @@ define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
|
||||
define float @test_atomicrmw_fsub_f32(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @test_atomicrmw_fsub_f32(
|
||||
; CHECK-NEXT: fence seq_cst
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP6]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
|
||||
|
||||
@@ -4,54 +4,46 @@
|
||||
define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @test_atomicrmw_fadd_f32(
|
||||
; CHECK-NEXT: call void @llvm.ppc.sync()
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP6]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: %loaded = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], %cmpxchg.end ]
|
||||
; CHECK-NEXT: %new = fadd float %loaded, %value
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float %new to i32
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float %loaded to i32
|
||||
; CHECK-NEXT: br label %cmpxchg.start
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.start: ; preds = %cmpxchg.trystore, %atomicrmw.start
|
||||
; CHECK-NEXT: %larx = call i32 @llvm.ppc.lwarx(ptr %ptr)
|
||||
; CHECK-NEXT: %should_store = icmp eq i32 %larx, [[TMP3]]
|
||||
; CHECK-NEXT: br i1 %should_store, label %cmpxchg.fencedstore, label %cmpxchg.nostore
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.fencedstore: ; preds = %cmpxchg.start
|
||||
; CHECK-NEXT: br label %cmpxchg.trystore
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.trystore: ; preds = %cmpxchg.fencedstore
|
||||
; CHECK-NEXT: %loaded.trystore = phi i32 [ %larx, %cmpxchg.fencedstore ]
|
||||
; CHECK-NEXT: %stcx = call i32 @llvm.ppc.stwcx(ptr %ptr, i32 [[TMP2]])
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 %stcx, 1
|
||||
; CHECK-NEXT: %success1 = icmp eq i32 [[TMP4]], 0
|
||||
; CHECK-NEXT: br i1 %success1, label %cmpxchg.success, label %cmpxchg.start
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.releasedload: ; No predecessors!
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[CMPXCHG_END:%.*]] ]
|
||||
; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float [[NEW]] to i32
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[LOADED]] to i32
|
||||
; CHECK-NEXT: br label [[CMPXCHG_START:%.*]]
|
||||
; CHECK: cmpxchg.start:
|
||||
; CHECK-NEXT: [[LARX:%.*]] = call i32 @llvm.ppc.lwarx(ptr [[PTR]])
|
||||
; CHECK-NEXT: [[SHOULD_STORE:%.*]] = icmp eq i32 [[LARX]], [[TMP3]]
|
||||
; CHECK-NEXT: br i1 [[SHOULD_STORE]], label [[CMPXCHG_FENCEDSTORE:%.*]], label [[CMPXCHG_NOSTORE:%.*]], !prof [[PROF0:![0-9]+]]
|
||||
; CHECK: cmpxchg.fencedstore:
|
||||
; CHECK-NEXT: br label [[CMPXCHG_TRYSTORE:%.*]]
|
||||
; CHECK: cmpxchg.trystore:
|
||||
; CHECK-NEXT: [[LOADED_TRYSTORE:%.*]] = phi i32 [ [[LARX]], [[CMPXCHG_FENCEDSTORE]] ]
|
||||
; CHECK-NEXT: [[STCX:%.*]] = call i32 @llvm.ppc.stwcx(ptr [[PTR]], i32 [[TMP2]])
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[STCX]], 1
|
||||
; CHECK-NEXT: [[SUCCESS1:%.*]] = icmp eq i32 [[TMP4]], 0
|
||||
; CHECK-NEXT: br i1 [[SUCCESS1]], label [[CMPXCHG_SUCCESS:%.*]], label [[CMPXCHG_START]], !prof [[PROF0]]
|
||||
; CHECK: cmpxchg.releasedload:
|
||||
; CHECK-NEXT: unreachable
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.success: ; preds = %cmpxchg.trystore
|
||||
; CHECK-NEXT: br label %cmpxchg.end
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.nostore: ; preds = %cmpxchg.start
|
||||
; CHECK-NEXT: %loaded.nostore = phi i32 [ %larx, %cmpxchg.start ]
|
||||
; CHECK-NEXT: br label %cmpxchg.failure
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.failure: ; preds = %cmpxchg.nostore
|
||||
; CHECK-NEXT: %loaded.failure = phi i32 [ %loaded.nostore, %cmpxchg.nostore ]
|
||||
; CHECK-NEXT: br label %cmpxchg.end
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.end: ; preds = %cmpxchg.failure, %cmpxchg.success
|
||||
; CHECK-NEXT: %loaded.exit = phi i32 [ %loaded.trystore, %cmpxchg.success ], [ %loaded.failure, %cmpxchg.failure ]
|
||||
; CHECK-NEXT: %success2 = phi i1 [ true, %cmpxchg.success ], [ false, %cmpxchg.failure ]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 %loaded.exit to float
|
||||
; CHECK-NEXT: br i1 %success2, label %atomicrmw.end, label %atomicrmw.start
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: atomicrmw.end: ; preds = %cmpxchg.end
|
||||
; CHECK: cmpxchg.success:
|
||||
; CHECK-NEXT: br label [[CMPXCHG_END]]
|
||||
; CHECK: cmpxchg.nostore:
|
||||
; CHECK-NEXT: [[LOADED_NOSTORE:%.*]] = phi i32 [ [[LARX]], [[CMPXCHG_START]] ]
|
||||
; CHECK-NEXT: br label [[CMPXCHG_FAILURE:%.*]]
|
||||
; CHECK: cmpxchg.failure:
|
||||
; CHECK-NEXT: [[LOADED_FAILURE:%.*]] = phi i32 [ [[LOADED_NOSTORE]], [[CMPXCHG_NOSTORE]] ]
|
||||
; CHECK-NEXT: br label [[CMPXCHG_END]]
|
||||
; CHECK: cmpxchg.end:
|
||||
; CHECK-NEXT: [[LOADED_EXIT:%.*]] = phi i32 [ [[LOADED_TRYSTORE]], [[CMPXCHG_SUCCESS]] ], [ [[LOADED_FAILURE]], [[CMPXCHG_FAILURE]] ]
|
||||
; CHECK-NEXT: [[SUCCESS2:%.*]] = phi i1 [ true, [[CMPXCHG_SUCCESS]] ], [ false, [[CMPXCHG_FAILURE]] ]
|
||||
; CHECK-NEXT: [[TMP5]] = bitcast i32 [[LOADED_EXIT]] to float
|
||||
; CHECK-NEXT: br i1 [[SUCCESS2]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
|
||||
; CHECK: atomicrmw.end:
|
||||
; CHECK-NEXT: call void @llvm.ppc.lwsync()
|
||||
; CHECK-NEXT: ret float [[TMP5]]
|
||||
; CHECK-NEXT: }
|
||||
;
|
||||
%res = atomicrmw fadd ptr %ptr, float %value seq_cst
|
||||
ret float %res
|
||||
}
|
||||
@@ -59,55 +51,46 @@ define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
|
||||
define float @test_atomicrmw_fsub_f32(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @test_atomicrmw_fsub_f32(
|
||||
; CHECK-NEXT: call void @llvm.ppc.sync()
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: br label %atomicrmw.start
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: atomicrmw.start:
|
||||
; CHECK-NEXT: %loaded = phi float [ [[TMP1]], %0 ], [ [[TMP5:%.*]], %cmpxchg.end ]
|
||||
; CHECK-NEXT: %new = fsub float %loaded, %value
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float %new to i32
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float %loaded to i32
|
||||
; CHECK-NEXT: br label %cmpxchg.start
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.start:
|
||||
; CHECK-NEXT: %larx = call i32 @llvm.ppc.lwarx(ptr %ptr)
|
||||
; CHECK-NEXT: %should_store = icmp eq i32 %larx, [[TMP3]]
|
||||
; CHECK-NEXT: br i1 %should_store, label %cmpxchg.fencedstore, label %cmpxchg.nostore
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.fencedstore: ; preds = %cmpxchg.start
|
||||
; CHECK-NEXT: br label %cmpxchg.trystore
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.trystore: ; preds = %cmpxchg.fencedstore
|
||||
; CHECK-NEXT: %loaded.trystore = phi i32 [ %larx, %cmpxchg.fencedstore ]
|
||||
; CHECK-NEXT: %stcx = call i32 @llvm.ppc.stwcx(ptr %ptr, i32 %2)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 %stcx, 1
|
||||
; CHECK-NEXT: %success1 = icmp eq i32 [[TMP4]], 0
|
||||
; CHECK-NEXT: br i1 %success1, label %cmpxchg.success, label %cmpxchg.start
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.releasedload: ; No predecessors!
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[TMP6]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[CMPXCHG_END:%.*]] ]
|
||||
; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float [[NEW]] to i32
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[LOADED]] to i32
|
||||
; CHECK-NEXT: br label [[CMPXCHG_START:%.*]]
|
||||
; CHECK: cmpxchg.start:
|
||||
; CHECK-NEXT: [[LARX:%.*]] = call i32 @llvm.ppc.lwarx(ptr [[PTR]])
|
||||
; CHECK-NEXT: [[SHOULD_STORE:%.*]] = icmp eq i32 [[LARX]], [[TMP3]]
|
||||
; CHECK-NEXT: br i1 [[SHOULD_STORE]], label [[CMPXCHG_FENCEDSTORE:%.*]], label [[CMPXCHG_NOSTORE:%.*]], !prof [[PROF0]]
|
||||
; CHECK: cmpxchg.fencedstore:
|
||||
; CHECK-NEXT: br label [[CMPXCHG_TRYSTORE:%.*]]
|
||||
; CHECK: cmpxchg.trystore:
|
||||
; CHECK-NEXT: [[LOADED_TRYSTORE:%.*]] = phi i32 [ [[LARX]], [[CMPXCHG_FENCEDSTORE]] ]
|
||||
; CHECK-NEXT: [[STCX:%.*]] = call i32 @llvm.ppc.stwcx(ptr [[PTR]], i32 [[TMP2]])
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[STCX]], 1
|
||||
; CHECK-NEXT: [[SUCCESS1:%.*]] = icmp eq i32 [[TMP4]], 0
|
||||
; CHECK-NEXT: br i1 [[SUCCESS1]], label [[CMPXCHG_SUCCESS:%.*]], label [[CMPXCHG_START]], !prof [[PROF0]]
|
||||
; CHECK: cmpxchg.releasedload:
|
||||
; CHECK-NEXT: unreachable
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.success: ; preds = %cmpxchg.trystore
|
||||
; CHECK-NEXT: br label %cmpxchg.end
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.nostore: ; preds = %cmpxchg.start
|
||||
; CHECK-NEXT: %loaded.nostore = phi i32 [ %larx, %cmpxchg.start ]
|
||||
; CHECK-NEXT: br label %cmpxchg.failure
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.failure: ; preds = %cmpxchg.nostore
|
||||
; CHECK-NEXT: %loaded.failure = phi i32 [ %loaded.nostore, %cmpxchg.nostore ]
|
||||
; CHECK-NEXT: br label %cmpxchg.end
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: cmpxchg.end: ; preds = %cmpxchg.failure, %cmpxchg.success
|
||||
; CHECK-NEXT: %loaded.exit = phi i32 [ %loaded.trystore, %cmpxchg.success ], [ %loaded.failure, %cmpxchg.failure ]
|
||||
; CHECK-NEXT: %success2 = phi i1 [ true, %cmpxchg.success ], [ false, %cmpxchg.failure ]
|
||||
; CHECK-NEXT: [[TMP5]] = bitcast i32 %loaded.exit to float
|
||||
; CHECK-NEXT: br i1 %success2, label %atomicrmw.end, label %atomicrmw.start
|
||||
; CHECK-EMPTY:
|
||||
; CHECK-NEXT: atomicrmw.end: ; preds = %cmpxchg.end
|
||||
; CHECK: cmpxchg.success:
|
||||
; CHECK-NEXT: br label [[CMPXCHG_END]]
|
||||
; CHECK: cmpxchg.nostore:
|
||||
; CHECK-NEXT: [[LOADED_NOSTORE:%.*]] = phi i32 [ [[LARX]], [[CMPXCHG_START]] ]
|
||||
; CHECK-NEXT: br label [[CMPXCHG_FAILURE:%.*]]
|
||||
; CHECK: cmpxchg.failure:
|
||||
; CHECK-NEXT: [[LOADED_FAILURE:%.*]] = phi i32 [ [[LOADED_NOSTORE]], [[CMPXCHG_NOSTORE]] ]
|
||||
; CHECK-NEXT: br label [[CMPXCHG_END]]
|
||||
; CHECK: cmpxchg.end:
|
||||
; CHECK-NEXT: [[LOADED_EXIT:%.*]] = phi i32 [ [[LOADED_TRYSTORE]], [[CMPXCHG_SUCCESS]] ], [ [[LOADED_FAILURE]], [[CMPXCHG_FAILURE]] ]
|
||||
; CHECK-NEXT: [[SUCCESS2:%.*]] = phi i1 [ true, [[CMPXCHG_SUCCESS]] ], [ false, [[CMPXCHG_FAILURE]] ]
|
||||
; CHECK-NEXT: [[TMP5]] = bitcast i32 [[LOADED_EXIT]] to float
|
||||
; CHECK-NEXT: br i1 [[SUCCESS2]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
|
||||
; CHECK: atomicrmw.end:
|
||||
; CHECK-NEXT: call void @llvm.ppc.lwsync()
|
||||
; CHECK-NEXT: ret float [[TMP5]]
|
||||
; CHECK-NEXT: }
|
||||
;
|
||||
|
||||
%res = atomicrmw fsub ptr %ptr, float %value seq_cst
|
||||
ret float %res
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @test_atomicrmw_fadd_f32(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = alloca float, align 4
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = call i32 @__atomic_load_4(ptr [[PTR:%.*]], i32 0)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[TMP8]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -30,7 +31,8 @@ define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
|
||||
define float @test_atomicrmw_fsub_f32(ptr %ptr, float %value) {
|
||||
; CHECK-LABEL: @test_atomicrmw_fsub_f32(
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = alloca float, align 4
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = call i32 @__atomic_load_4(ptr [[PTR:%.*]], i32 0)
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[TMP8]] to float
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
|
||||
|
||||
@@ -134,23 +134,27 @@ define i128 @test_cmpxchg_i128(ptr %arg, i128 %old, i128 %new) {
|
||||
; CHECK-LABEL: @test_add_i128(
|
||||
; CHECK: %1 = alloca i128, align 8
|
||||
; CHECK: %2 = alloca i128, align 8
|
||||
; CHECK: %3 = load i128, ptr %arg, align 16
|
||||
; CHECK: %3 = alloca i128, align 8
|
||||
; CHECK: call void @llvm.lifetime.start.p0(ptr %3)
|
||||
; CHECK: call void @__atomic_load(i32 16, ptr %arg, ptr %3, i32 0)
|
||||
; CHECK: %4 = load i128, ptr %3, align 8
|
||||
; CHECK: call void @llvm.lifetime.end.p0(ptr %3)
|
||||
; CHECK: br label %atomicrmw.start
|
||||
; CHECK:atomicrmw.start:
|
||||
; CHECK: %loaded = phi i128 [ %3, %0 ], [ %newloaded, %atomicrmw.start ]
|
||||
; CHECK: %loaded = phi i128 [ %4, %0 ], [ %newloaded, %atomicrmw.start ]
|
||||
; CHECK: %new = add i128 %loaded, %val
|
||||
; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
|
||||
; CHECK: store i128 %loaded, ptr %1, align 8
|
||||
; CHECK: call void @llvm.lifetime.start.p0(ptr %2)
|
||||
; CHECK: store i128 %new, ptr %2, align 8
|
||||
; CHECK: %4 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 5)
|
||||
; CHECK: %5 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 5)
|
||||
; CHECK: call void @llvm.lifetime.end.p0(ptr %2)
|
||||
; CHECK: %5 = load i128, ptr %1, align 8
|
||||
; CHECK: %6 = load i128, ptr %1, align 8
|
||||
; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
|
||||
; CHECK: %6 = insertvalue { i128, i1 } poison, i128 %5, 0
|
||||
; CHECK: %7 = insertvalue { i128, i1 } %6, i1 %4, 1
|
||||
; CHECK: %success = extractvalue { i128, i1 } %7, 1
|
||||
; CHECK: %newloaded = extractvalue { i128, i1 } %7, 0
|
||||
; CHECK: %7 = insertvalue { i128, i1 } poison, i128 %6, 0
|
||||
; CHECK: %8 = insertvalue { i128, i1 } %7, i1 %5, 1
|
||||
; CHECK: %success = extractvalue { i128, i1 } %8, 1
|
||||
; CHECK: %newloaded = extractvalue { i128, i1 } %8, 0
|
||||
; CHECK: br i1 %success, label %atomicrmw.end, label %atomicrmw.start
|
||||
; CHECK:atomicrmw.end:
|
||||
; CHECK: ret i128 %newloaded
|
||||
|
||||
@@ -25,7 +25,7 @@ define i8 @test_cmpxchg_i8(ptr %arg, i8 %old, i8 %new) {
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = zext i8 [[OLD:%.*]] to i32
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = shl i32 [[TMP5]], [[SHIFTAMT]]
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = load atomic i32, ptr [[ALIGNEDADDR]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = and i32 [[TMP7]], [[INV_MASK]]
|
||||
; CHECK-NEXT: br label [[PARTWORD_CMPXCHG_LOOP:%.*]]
|
||||
; CHECK: partword.cmpxchg.loop:
|
||||
@@ -71,7 +71,7 @@ define i16 @test_cmpxchg_i16(ptr %arg, i16 %old, i16 %new) {
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = zext i16 [[OLD:%.*]] to i32
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = shl i32 [[TMP5]], [[SHIFTAMT]]
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = load atomic i32, ptr [[ALIGNEDADDR]] monotonic, align 4
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = and i32 [[TMP7]], [[INV_MASK]]
|
||||
; CHECK-NEXT: br label [[PARTWORD_CMPXCHG_LOOP:%.*]]
|
||||
; CHECK: partword.cmpxchg.loop:
|
||||
@@ -115,7 +115,7 @@ define i16 @test_add_i16(ptr %arg, i16 %val) {
|
||||
; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VAL:%.*]] to i32
|
||||
; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load atomic i32, ptr [[ALIGNEDADDR]] monotonic, align 4
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -152,7 +152,7 @@ define i16 @test_xor_i16(ptr %arg, i16 %val) {
|
||||
; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VAL:%.*]] to i32
|
||||
; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load atomic i32, ptr [[ALIGNEDADDR]] monotonic, align 4
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -186,7 +186,7 @@ define i16 @test_or_i16(ptr %arg, i16 %val) {
|
||||
; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VAL:%.*]] to i32
|
||||
; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load atomic i32, ptr [[ALIGNEDADDR]] monotonic, align 4
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -221,7 +221,7 @@ define i16 @test_and_i16(ptr %arg, i16 %val) {
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VAL:%.*]] to i32
|
||||
; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
|
||||
; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load atomic i32, ptr [[ALIGNEDADDR]] monotonic, align 4
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -253,7 +253,7 @@ define i16 @test_min_i16(ptr %arg, i16 %val) {
|
||||
; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
|
||||
; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
|
||||
; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = load atomic i32, ptr [[ALIGNEDADDR]] monotonic, align 4
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP3]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
|
||||
@@ -291,7 +291,7 @@ define half @test_atomicrmw_fadd_f16(ptr %ptr, half %value) {
|
||||
; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP3]] to i32
|
||||
; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
|
||||
; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ALIGNEDADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load atomic i32, ptr [[ALIGNEDADDR]] monotonic, align 4
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CHECK: atomicrmw.start:
|
||||
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
|
||||
|
||||
Reference in New Issue
Block a user