From 0fe7c3a5453fc303db9c7a3ca17dab362ea832b4 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Wed, 20 Sep 2023 16:38:16 -0700 Subject: [PATCH 01/18] Add an "exact" flag to AtomicCmpXchgInst This will be used in the follow-up commits to determine whether to lower cmpxchg with an exact capability comparison or just the address. --- llvm/include/llvm/IR/Instructions.h | 19 ++++++-- llvm/lib/AsmParser/LLParser.cpp | 2 + llvm/lib/Bitcode/Reader/BitcodeReader.cpp | 8 ++-- llvm/lib/Bitcode/Writer/BitcodeWriter.cpp | 1 + llvm/lib/IR/AsmWriter.cpp | 3 ++ llvm/lib/IR/Instruction.cpp | 2 + llvm/lib/IR/Instructions.cpp | 1 + llvm/lib/IR/Verifier.cpp | 3 ++ .../Transforms/Utils/FunctionComparator.cpp | 3 ++ llvm/lib/Transforms/Utils/LowerAtomic.cpp | 1 + llvm/test/Assembler/cmpxchg-exact-flag.ll | 46 +++++++++++++++++++ 11 files changed, 81 insertions(+), 8 deletions(-) create mode 100644 llvm/test/Assembler/cmpxchg-exact-flag.ll diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h index 08f5b9f40a5d..94a73fd92ed7 100644 --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -547,17 +547,18 @@ class AtomicCmpXchgInst : public Instruction { void operator delete(void *Ptr) { User::operator delete(Ptr); } using VolatileField = BoolBitfieldElementT<0>; - using WeakField = BoolBitfieldElementT; + using ExactCompareField = BoolBitfieldElementT; + using WeakField = BoolBitfieldElementT; using SuccessOrderingField = AtomicOrderingBitfieldElementT; using FailureOrderingField = AtomicOrderingBitfieldElementT; using AlignmentField = AlignmentBitfieldElementT; - static_assert( - Bitfield::areContiguous(), - "Bitfields must be contiguous"); + static_assert(Bitfield::areContiguous(), + "Bitfields must be contiguous"); /// Return the alignment of the memory that is being allocated by the /// instruction. @@ -583,6 +584,14 @@ class AtomicCmpXchgInst : public Instruction { void setWeak(bool IsWeak) { setSubclassData(IsWeak); } + /// Return true if the cmpxchg must compare all bits of the value + /// This is only relevant for CHERI where the two possible semantics are + /// comparing only the address or all capability bits. + bool isExactCompare() const { return getSubclassData(); } + void setExactCompare(bool Exact) { + setSubclassData(Exact); + } + /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index b2f3e0f4fb25..80d6f1298375 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -7418,6 +7418,7 @@ int LLParser::parseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) { if (EatIfPresent(lltok::kw_volatile)) isVolatile = true; + bool isExact = EatIfPresent(lltok::kw_exact); if (parseTypeAndValue(Ptr, PtrLoc, PFS) || parseToken(lltok::comma, "expected ',' after cmpxchg address") || @@ -7455,6 +7456,7 @@ int LLParser::parseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) { SuccessOrdering, FailureOrdering, SSID); CXI->setVolatile(isVolatile); CXI->setWeak(isWeak); + CXI->setExactCompare(isExact); Inst = CXI; return AteExtraComma ? InstExtraComma : InstNormal; diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index 2a7b99e54af1..6c86d7892fe6 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -6005,7 +6005,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) { } case bitc::FUNC_CODE_INST_CMPXCHG: { // CMPXCHG: [ptrty, ptr, cmp, val, vol, success_ordering, synchscope, - // failure_ordering, weak, align?] + // failure_ordering, weak, align?, exact?] const size_t NumRecords = Record.size(); unsigned OpNum = 0; Value *Ptr = nullptr; @@ -6026,7 +6026,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) { CurBB)) return error("Invalid record"); - if (NumRecords < OpNum + 3 || NumRecords > OpNum + 6) + if (NumRecords < OpNum + 3 || NumRecords > OpNum + 7) return error("Invalid record"); const bool IsVol = Record[OpNum]; @@ -6050,10 +6050,11 @@ Error BitcodeReader::parseFunctionBody(Function *F) { MaybeAlign Alignment; - if (NumRecords == (OpNum + 6)) { + if (NumRecords >= (OpNum + 6)) { if (Error Err = parseAlignmentValue(Record[OpNum + 5], Alignment)) return Err; } + const bool IsExact = NumRecords >= (OpNum + 7) && Record[OpNum + 6]; if (!Alignment) Alignment = Align(TheModule->getDataLayout().getTypeStoreSize(Cmp->getType())); @@ -6062,6 +6063,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) { FailureOrdering, SSID); cast(I)->setVolatile(IsVol); cast(I)->setWeak(IsWeak); + cast(I)->setExactCompare(IsExact); unsigned I1TypeID = getVirtualTypeID(Type::getInt1Ty(Context)); ResTypeID = getVirtualTypeID(I->getType(), {CmpTypeID, I1TypeID}); diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp index 383ab8b25641..ecdf96d7d3d1 100644 --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -3156,6 +3156,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I, getEncodedOrdering(cast(I).getFailureOrdering())); Vals.push_back(cast(I).isWeak()); Vals.push_back(getEncodedAlign(cast(I).getAlign())); + Vals.push_back(cast(I).isExactCompare()); break; case Instruction::AtomicRMW: Code = bitc::FUNC_CODE_INST_ATOMICRMW; diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index 18ed2c63f24a..f0de2331c899 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -3984,6 +3984,9 @@ void AssemblyWriter::printInstruction(const Instruction &I) { if (isa(I) && cast(I).isWeak()) Out << " weak"; + if (isa(I) && cast(I).isExactCompare()) + Out << " exact"; + // If this is a volatile operation, print out the volatile marker. if ((isa(I) && cast(I).isVolatile()) || (isa(I) && cast(I).isVolatile()) || diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp index 84d7013424b0..f8cdd475aaaa 100644 --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -478,6 +478,8 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2, if (const AtomicCmpXchgInst *CXI = dyn_cast(I1)) return CXI->isVolatile() == cast(I2)->isVolatile() && CXI->isWeak() == cast(I2)->isWeak() && + CXI->isExactCompare() == + cast(I2)->isExactCompare() && CXI->getSuccessOrdering() == cast(I2)->getSuccessOrdering() && CXI->getFailureOrdering() == diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index af21f41105a9..494813220e2b 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -4757,6 +4757,7 @@ AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { getSuccessOrdering(), getFailureOrdering(), getSyncScopeID()); Result->setVolatile(isVolatile()); Result->setWeak(isWeak()); + Result->setExactCompare(isExactCompare()); return Result; } diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 34416ebd33b5..3dbe5a0ea0ae 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -3963,6 +3963,9 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { Check(ElTy->isIntOrPtrTy(), "cmpxchg operand must have integer or pointer type", ElTy, &CXI); checkAtomicMemAccessSize(ElTy, &CXI); + if (CXI.isExactCompare()) + Check(DL.isFatPointer(CXI.getNewValOperand()->getType()), + "exact flag on cmpxchg is only valid for capability types", &CXI); visitInstruction(CXI); } diff --git a/llvm/lib/Transforms/Utils/FunctionComparator.cpp b/llvm/lib/Transforms/Utils/FunctionComparator.cpp index 06596f7b04e1..52c376c8d4f7 100644 --- a/llvm/lib/Transforms/Utils/FunctionComparator.cpp +++ b/llvm/lib/Transforms/Utils/FunctionComparator.cpp @@ -645,6 +645,9 @@ int FunctionComparator::cmpOperations(const Instruction *L, if (int Res = cmpNumbers(CXI->isWeak(), cast(R)->isWeak())) return Res; + if (int Res = cmpNumbers(CXI->isExactCompare(), + cast(R)->isExactCompare())) + return Res; if (int Res = cmpOrderings(CXI->getSuccessOrdering(), cast(R)->getSuccessOrdering())) diff --git a/llvm/lib/Transforms/Utils/LowerAtomic.cpp b/llvm/lib/Transforms/Utils/LowerAtomic.cpp index 7ab986450258..beab439a1059 100644 --- a/llvm/lib/Transforms/Utils/LowerAtomic.cpp +++ b/llvm/lib/Transforms/Utils/LowerAtomic.cpp @@ -27,6 +27,7 @@ bool llvm::lowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) { Value *Val = CXI->getNewValOperand(); LoadInst *Orig = Builder.CreateLoad(Val->getType(), Ptr); + assert(!CXI->isExactCompare() && "Exact cmpxchg is not handled yet!"); Value *Equal = Builder.CreateICmpEQ(Orig, Cmp); Value *Res = Builder.CreateSelect(Equal, Val, Orig); Builder.CreateStore(Res, Ptr); diff --git a/llvm/test/Assembler/cmpxchg-exact-flag.ll b/llvm/test/Assembler/cmpxchg-exact-flag.ll new file mode 100644 index 000000000000..712c6366d3d7 --- /dev/null +++ b/llvm/test/Assembler/cmpxchg-exact-flag.ll @@ -0,0 +1,46 @@ +; RUN: split-file %s %t +; RUN: not llvm-as < %t/not-ptr.ll 2>&1 | FileCheck %s --check-prefix=ERR-NOT-PTR +; RUN: not llvm-as < %t/not-cap.ll 2>&1 | FileCheck %s --check-prefix=ERR-NOT-CAP +; RUN: not llvm-as < %t/wrong-order.ll 2>&1 | FileCheck %s --check-prefix=ERR-WRONG-ORDER +; RUN: llvm-as < %t/valid.ll | llvm-dis -o - | FileCheck %s --check-prefix=ROUNDTRIP + +;--- wrong-order.ll +define void @f(ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c, ptr %d) { + ; ERR-WRONG-ORDER: :4:31: error: expected type + ;; The "exact" flag must come after the optional "weak" flag. + %x = cmpxchg volatile exact weak ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst, align 16 + ret void +} + +;--- not-ptr.ll +define void @f(i32* %a, i32 %b, i32 %c) { + ; ERR-NOT-PTR: assembly parsed, but does not verify as correct! + ; ERR-NOT-PTR-NEXT: exact flag on cmpxchg is only valid for capability types + %x = cmpxchg exact i32* %a, i32 %b, i32 %c seq_cst seq_cst + ret void +} + +;--- not-cap.ll +target datalayout = "em:e-pf200:128:128:128:64" +define void @f(ptr addrspace(200) %a, ptr addrspace(1) %b, ptr addrspace(1) %c) { + ; ERR-NOT-CAP: assembly parsed, but does not verify as correct! + ; ERR-NOT-CAP-NEXT: exact flag on cmpxchg is only valid for capability types + %x = cmpxchg exact ptr addrspace(200) %a, ptr addrspace(1) %b, ptr addrspace(1) %c seq_cst seq_cst, align 16 + ret void +} + +;--- valid.ll +target datalayout = "em:e-pf200:128:128:128:64" +define void @f(ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c, ptr %d) { + ; ROUNDTRIP: %x = cmpxchg weak exact ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst, align 16 + %x = cmpxchg weak exact ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst + ; ROUNDTRIP: %y = cmpxchg exact ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst, align 64 + %y = cmpxchg exact ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst, align 64 + ; ROUNDTRIP: %z = cmpxchg ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst, align 16 + %z = cmpxchg ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst + ; ROUNDTRIP: %no_align = cmpxchg exact ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst, align 16 + %no_align = cmpxchg exact ptr addrspace(200) %a, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst + ; ROUNDTRIP: %noncap_ptr = cmpxchg exact ptr %d, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst, align 32 + %noncap_ptr = cmpxchg exact ptr %d, ptr addrspace(200) %b, ptr addrspace(200) %c seq_cst seq_cst, align 32 + ret void +} From f680e9308162b23481efb808be26627537ba33f1 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Wed, 20 Sep 2023 17:55:16 -0700 Subject: [PATCH 02/18] Add an ExactCompare flag to MachineMemOperand This will be used to support exact comparisons in cmxpchg lowering. --- llvm/include/llvm/CodeGen/MachineFunction.h | 2 + llvm/include/llvm/CodeGen/MachineMemOperand.h | 16 ++++++- llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 2 +- llvm/lib/CodeGen/MIRParser/MIParser.cpp | 5 +- llvm/lib/CodeGen/MachineFunction.cpp | 46 ++++++++++--------- llvm/lib/CodeGen/MachineOperand.cpp | 13 ++++-- .../SelectionDAG/SelectionDAGBuilder.cpp | 2 +- .../Target/Hexagon/HexagonFrameLowering.cpp | 2 +- .../Target/Hexagon/HexagonISelLowering.cpp | 3 +- llvm/tools/llvm-reduce/ReducerWorkItem.cpp | 2 +- 10 files changed, 58 insertions(+), 35 deletions(-) diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h index fc1188186ac4..6d384ed54d33 100644 --- a/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/llvm/include/llvm/CodeGen/MachineFunction.h @@ -960,6 +960,7 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction { Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System, AtomicOrdering Ordering = AtomicOrdering::NotAtomic, + bool ExactCompare = false, AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); MachineMemOperand *getMachineMemOperand( @@ -967,6 +968,7 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction { Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System, AtomicOrdering Ordering = AtomicOrdering::NotAtomic, + bool ExactCompare = false, AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); /// getMachineMemOperand - Allocate a new MachineMemOperand by copying diff --git a/llvm/include/llvm/CodeGen/MachineMemOperand.h b/llvm/include/llvm/CodeGen/MachineMemOperand.h index 8efe4495a654..e3e5cd32aa9e 100644 --- a/llvm/include/llvm/CodeGen/MachineMemOperand.h +++ b/llvm/include/llvm/CodeGen/MachineMemOperand.h @@ -164,10 +164,13 @@ class MachineMemOperand { unsigned SSID : 8; // SyncScope::ID /// Atomic ordering requirements for this memory operation. For cmpxchg /// atomic operations, atomic ordering requirements when store occurs. - unsigned Ordering : 4; // enum AtomicOrdering + unsigned Ordering : 3; // enum AtomicOrdering /// For cmpxchg atomic operations, atomic ordering requirements when store /// does not occur. - unsigned FailureOrdering : 4; // enum AtomicOrdering + unsigned FailureOrdering : 3; // enum AtomicOrdering + /// Whether capability comparisons are exact for cmpxchg atomic operations. + unsigned ExactCompare : 1; + static_assert((unsigned)AtomicOrdering::LAST <= 7, "Not enough bits"); }; MachinePointerInfo PtrInfo; @@ -193,12 +196,14 @@ class MachineMemOperand { const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System, AtomicOrdering Ordering = AtomicOrdering::NotAtomic, + bool ExactCompare = false, AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LLT type, Align a, const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System, AtomicOrdering Ordering = AtomicOrdering::NotAtomic, + bool ExactCompare = false, AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); const MachinePointerInfo &getPointerInfo() const { return PtrInfo; } @@ -280,6 +285,13 @@ class MachineMemOperand { return static_cast(AtomicInfo.FailureOrdering); } + /// For cmpxchg atomic operations, return whether the new value operand + /// needs to be compared exactly with the old value or if the address is + /// sufficient (only relevant for CHERI capabilities). + bool isExactCompare() const { + return static_cast(AtomicInfo.ExactCompare); + } + /// Return a single atomic ordering that is at least as strong as both the /// success and failure orderings for an atomic operation. (For operations /// other than cmpxchg, this is equivalent to getSuccessOrdering().) diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index cf018444ff25..68e625ad214a 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2873,7 +2873,7 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U, *MF->getMachineMemOperand( MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp), getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(), - I.getSuccessOrdering(), I.getFailureOrdering())); + I.getSuccessOrdering(), I.isExactCompare(), I.getFailureOrdering())); return true; } diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index e639edf10d71..33f4b8890960 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -3242,6 +3242,8 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) { if (parseOptionalAtomicOrdering(FailureOrder)) return true; + const bool ExactCompare = consumeIfPresent(MIToken::kw_exact); + LLT MemoryType; if (Token.isNot(MIToken::IntegerLiteral) && Token.isNot(MIToken::kw_unknown_size) && @@ -3344,7 +3346,8 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) { if (expectAndConsume(MIToken::rparen)) return true; Dest = MF.getMachineMemOperand(Ptr, Flags, MemoryType, Align(BaseAlignment), - AAInfo, Range, SSID, Order, FailureOrder); + AAInfo, Range, SSID, Order, ExactCompare, + FailureOrder); return false; } diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp index 6b481a374382..cca583cd4dee 100644 --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -454,37 +454,37 @@ void MachineFunction::deleteMachineBasicBlock(MachineBasicBlock *MBB) { MachineMemOperand *MachineFunction::getMachineMemOperand( MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges, - SyncScope::ID SSID, AtomicOrdering Ordering, + SyncScope::ID SSID, AtomicOrdering Ordering, bool ExactCompare, AtomicOrdering FailureOrdering) { return new (Allocator) - MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges, - SSID, Ordering, FailureOrdering); + MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges, SSID, + Ordering, ExactCompare, FailureOrdering); } MachineMemOperand *MachineFunction::getMachineMemOperand( MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges, - SyncScope::ID SSID, AtomicOrdering Ordering, + SyncScope::ID SSID, AtomicOrdering Ordering, bool ExactCompare, AtomicOrdering FailureOrdering) { return new (Allocator) MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID, - Ordering, FailureOrdering); + Ordering, ExactCompare, FailureOrdering); } MachineMemOperand *MachineFunction::getMachineMemOperand( const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, uint64_t Size) { - return new (Allocator) - MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(), - AAMDNodes(), nullptr, MMO->getSyncScopeID(), - MMO->getSuccessOrdering(), MMO->getFailureOrdering()); + return new (Allocator) MachineMemOperand( + PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(), AAMDNodes(), nullptr, + MMO->getSyncScopeID(), MMO->getSuccessOrdering(), MMO->isExactCompare(), + MMO->getFailureOrdering()); } MachineMemOperand *MachineFunction::getMachineMemOperand( const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) { - return new (Allocator) - MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(), - AAMDNodes(), nullptr, MMO->getSyncScopeID(), - MMO->getSuccessOrdering(), MMO->getFailureOrdering()); + return new (Allocator) MachineMemOperand( + PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(), AAMDNodes(), nullptr, + MMO->getSyncScopeID(), MMO->getSuccessOrdering(), MMO->isExactCompare(), + MMO->getFailureOrdering()); } MachineMemOperand * @@ -500,10 +500,11 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, // Do not preserve ranges, since we don't necessarily know what the high bits // are anymore. - return new (Allocator) MachineMemOperand( - PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment, - MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(), - MMO->getSuccessOrdering(), MMO->getFailureOrdering()); + return new (Allocator) + MachineMemOperand(PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, + Alignment, MMO->getAAInfo(), nullptr, + MMO->getSyncScopeID(), MMO->getSuccessOrdering(), + MMO->isExactCompare(), MMO->getFailureOrdering()); } MachineMemOperand * @@ -516,16 +517,17 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, return new (Allocator) MachineMemOperand( MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo, MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(), - MMO->getFailureOrdering()); + MMO->isExactCompare(), MMO->getFailureOrdering()); } MachineMemOperand * MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, MachineMemOperand::Flags Flags) { - return new (Allocator) MachineMemOperand( - MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(), - MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), - MMO->getSuccessOrdering(), MMO->getFailureOrdering()); + return new (Allocator) + MachineMemOperand(MMO->getPointerInfo(), Flags, MMO->getSize(), + MMO->getBaseAlign(), MMO->getAAInfo(), MMO->getRanges(), + MMO->getSyncScopeID(), MMO->getSuccessOrdering(), + MMO->isExactCompare(), MMO->getFailureOrdering()); } MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo( diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp index dfb3a4706990..b96ff8086ac0 100644 --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -1032,7 +1032,7 @@ MachinePointerInfo MachinePointerInfo::getUnknownStack(MachineFunction &MF) { MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f, LLT type, Align a, const AAMDNodes &AAInfo, const MDNode *Ranges, SyncScope::ID SSID, - AtomicOrdering Ordering, + AtomicOrdering Ordering, bool ExactCompare, AtomicOrdering FailureOrdering) : PtrInfo(ptrinfo), MemoryType(type), FlagVals(f), BaseAlign(a), AAInfo(AAInfo), Ranges(Ranges) { @@ -1047,17 +1047,18 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f, assert(getSuccessOrdering() == Ordering && "Value truncated"); AtomicInfo.FailureOrdering = static_cast(FailureOrdering); assert(getFailureOrdering() == FailureOrdering && "Value truncated"); + AtomicInfo.ExactCompare = ExactCompare; } MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f, uint64_t s, Align a, const AAMDNodes &AAInfo, const MDNode *Ranges, SyncScope::ID SSID, - AtomicOrdering Ordering, + AtomicOrdering Ordering, bool ExactCompare, AtomicOrdering FailureOrdering) - : MachineMemOperand(ptrinfo, f, - s == ~UINT64_C(0) ? LLT() : LLT::scalar(8 * s), a, - AAInfo, Ranges, SSID, Ordering, FailureOrdering) {} + : MachineMemOperand( + ptrinfo, f, s == ~UINT64_C(0) ? LLT() : LLT::scalar(8 * s), a, AAInfo, + Ranges, SSID, Ordering, ExactCompare, FailureOrdering) {} /// Profile - Gather unique data for the object. /// @@ -1129,6 +1130,8 @@ void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, OS << toIRString(getSuccessOrdering()) << ' '; if (getFailureOrdering() != AtomicOrdering::NotAtomic) OS << toIRString(getFailureOrdering()) << ' '; + if (AtomicInfo.ExactCompare) + OS << "exact "; if (getMemoryType().isValid()) OS << '(' << getMemoryType() << ')'; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 1302b909a764..ab4553bd4c67 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4628,7 +4628,7 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) { MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering, - FailureOrdering); + I.isExactCompare(), FailureOrdering); SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain, diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp index 01501109f3b1..4884f70ad42b 100644 --- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -1556,7 +1556,7 @@ void HexagonFrameLowering::processFunctionBeforeFrameFinalized( MMO->getPointerInfo(), MMO->getFlags(), MMO->getSize(), MFI.getObjectAlign(FI), MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(), - MMO->getFailureOrdering()); + MMO->isExactCompare(), MMO->getFailureOrdering()); new_memops.push_back(NewMMO); KeepOld = false; continue; diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index 2a11d3ab59ce..05e3b8b859b7 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3079,7 +3079,8 @@ HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) WideMMO = MF.getMachineMemOperand( MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen), MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), - MMO->getSuccessOrdering(), MMO->getFailureOrdering()); + MMO->getSuccessOrdering(), MMO->isExactCompare(), + MMO->getFailureOrdering()); } SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO); diff --git a/llvm/tools/llvm-reduce/ReducerWorkItem.cpp b/llvm/tools/llvm-reduce/ReducerWorkItem.cpp index b6e78dd97e76..35e1e908ad3c 100644 --- a/llvm/tools/llvm-reduce/ReducerWorkItem.cpp +++ b/llvm/tools/llvm-reduce/ReducerWorkItem.cpp @@ -183,7 +183,7 @@ static void cloneMemOperands(MachineInstr &DstMI, MachineInstr &SrcMI, NewPtrInfo, OldMMO->getFlags(), OldMMO->getMemoryType(), OldMMO->getBaseAlign(), OldMMO->getAAInfo(), OldMMO->getRanges(), OldMMO->getSyncScopeID(), OldMMO->getSuccessOrdering(), - OldMMO->getFailureOrdering()); + OldMMO->isExactCompare(), OldMMO->getFailureOrdering()); NewMMOs.push_back(NewMMO); } From 3bfa7ac40cb124d3a9582fe2a8827d80c386ad9d Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Wed, 20 Sep 2023 18:10:58 -0700 Subject: [PATCH 03/18] [CHERI-Generic] Update a test to use opaque pointers This also allows dropping one test that is now redundant since only the pointee type differed. --- .../CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll | 54 ++-- .../CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll | 224 ++++++---------- .../CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll | 230 +++++------------ .../CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll | 242 +++++------------- 4 files changed, 215 insertions(+), 535 deletions(-) diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll index a2f1a7b939b7..9f9e544dc3dc 100644 --- a/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll @@ -8,63 +8,53 @@ @IF-RISCV@; RUN: llc @HYBRID_HARDFLOAT_ARGS@ -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-LIBCALLS --allow-unused-prefixes @IFNOT-RISCV@; RUN: llc @HYBRID_HARDFLOAT_ARGS@ < %s | FileCheck %s --check-prefix=HYBRID -define { i8, i1 } @test_cmpxchg_strong_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %new) nounwind { - %1 = cmpxchg i8 addrspace(200)* %ptr, i8 %exp, i8 %new acq_rel acquire +define { i8, i1 } @test_cmpxchg_strong_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { + %1 = cmpxchg ptr addrspace(200) %ptr, i8 %exp, i8 %new acq_rel acquire ret { i8, i1 } %1 } -define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp, i16 %new) nounwind { - %1 = cmpxchg i16 addrspace(200)* %ptr, i16 %exp, i16 %new acq_rel acquire +define { i16, i1 } @test_cmpxchg_strong_i16(ptr addrspace(200) %ptr, i16 %exp, i16 %new) nounwind { + %1 = cmpxchg ptr addrspace(200) %ptr, i16 %exp, i16 %new acq_rel acquire ret { i16, i1 } %1 } -define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp, i32 %new) nounwind { - %1 = cmpxchg i32 addrspace(200)* %ptr, i32 %exp, i32 %new acq_rel acquire +define { i32, i1 } @test_cmpxchg_strong_i32(ptr addrspace(200) %ptr, i32 %exp, i32 %new) nounwind { + %1 = cmpxchg ptr addrspace(200) %ptr, i32 %exp, i32 %new acq_rel acquire ret { i32, i1 } %1 } -define { i64, i1 } @test_cmpxchg_strong_i64(i64 addrspace(200)* %ptr, i64 %exp, i64 %new) nounwind { - %1 = cmpxchg i64 addrspace(200)* %ptr, i64 %exp, i64 %new acq_rel acquire +define { i64, i1 } @test_cmpxchg_strong_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { + %1 = cmpxchg ptr addrspace(200) %ptr, i64 %exp, i64 %new acq_rel acquire ret { i64, i1 } %1 } -define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new) nounwind { - %1 = cmpxchg i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new acq_rel acquire - ret { i8 addrspace(200)*, i1 } %1 +define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { + %1 = cmpxchg ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 } -define { i32 addrspace(200)*, i1 } @test_cmpxchg_strong_cap_i32(i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new) nounwind { - %1 = cmpxchg weak i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new acq_rel acquire - ret { i32 addrspace(200)*, i1 } %1 -} - - -define { i8, i1 } @test_cmpxchg_weak_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %new) nounwind { - %1 = cmpxchg weak i8 addrspace(200)* %ptr, i8 %exp, i8 %new acq_rel acquire +define { i8, i1 } @test_cmpxchg_weak_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { + %1 = cmpxchg weak ptr addrspace(200) %ptr, i8 %exp, i8 %new acq_rel acquire ret { i8, i1 } %1 } -define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i16 %new) nounwind { - %1 = cmpxchg weak i16 addrspace(200)* %ptr, i16 %exp, i16 %new acq_rel acquire +define { i16, i1 } @test_cmpxchg_weak_i16(ptr addrspace(200) %ptr, i16 %exp, i16 %new) nounwind { + %1 = cmpxchg weak ptr addrspace(200) %ptr, i16 %exp, i16 %new acq_rel acquire ret { i16, i1 } %1 } -define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i32 %new) nounwind { - %1 = cmpxchg weak i32 addrspace(200)* %ptr, i32 %exp, i32 %new acq_rel acquire +define { i32, i1 } @test_cmpxchg_weak_i32(ptr addrspace(200) %ptr, i32 %exp, i32 %new) nounwind { + %1 = cmpxchg weak ptr addrspace(200) %ptr, i32 %exp, i32 %new acq_rel acquire ret { i32, i1 } %1 } -define { i64, i1 } @test_cmpxchg_weak_i64(i64 addrspace(200)* %ptr, i64 %exp, i64 %new) nounwind { - %1 = cmpxchg weak i64 addrspace(200)* %ptr, i64 %exp, i64 %new acq_rel acquire +define { i64, i1 } @test_cmpxchg_weak_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { + %1 = cmpxchg weak ptr addrspace(200) %ptr, i64 %exp, i64 %new acq_rel acquire ret { i64, i1 } %1 } -define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new) nounwind { - %1 = cmpxchg weak i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new acq_rel acquire - ret { i8 addrspace(200)*, i1 } %1 +define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { + %1 = cmpxchg weak ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 } -define { i32 addrspace(200)*, i1 } @test_cmpxchg_weak_cap_i32(i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new) nounwind { - %1 = cmpxchg weak i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new acq_rel acquire - ret { i32 addrspace(200)*, i1 } %1 -} diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll index 7214188e7be0..0613e364669d 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll @@ -6,7 +6,7 @@ ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap < %s | FileCheck %s --check-prefix=PURECAP ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 < %s | FileCheck %s --check-prefix=HYBRID -define { i8, i1 } @test_cmpxchg_strong_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %new) nounwind { +define { i8, i1 } @test_cmpxchg_strong_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_strong_i8: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sll $1, $5, 0 @@ -50,11 +50,11 @@ define { i8, i1 } @test_cmpxchg_strong_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 % ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: nop - %1 = cmpxchg i8 addrspace(200)* %ptr, i8 %exp, i8 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i8 %exp, i8 %new acq_rel acquire ret { i8, i1 } %1 } -define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp, i16 %new) nounwind { +define { i16, i1 } @test_cmpxchg_strong_i16(ptr addrspace(200) %ptr, i16 %exp, i16 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_strong_i16: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sll $1, $5, 0 @@ -98,11 +98,11 @@ define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp, ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: nop - %1 = cmpxchg i16 addrspace(200)* %ptr, i16 %exp, i16 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i16 %exp, i16 %new acq_rel acquire ret { i16, i1 } %1 } -define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp, i32 %new) nounwind { +define { i32, i1 } @test_cmpxchg_strong_i32(ptr addrspace(200) %ptr, i32 %exp, i32 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_strong_i32: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sll $1, $5, 0 @@ -142,11 +142,11 @@ define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp, ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: nop - %1 = cmpxchg i32 addrspace(200)* %ptr, i32 %exp, i32 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i32 %exp, i32 %new acq_rel acquire ret { i32, i1 } %1 } -define { i64, i1 } @test_cmpxchg_strong_i64(i64 addrspace(200)* %ptr, i64 %exp, i64 %new) nounwind { +define { i64, i1 } @test_cmpxchg_strong_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_strong_i64: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sync @@ -182,11 +182,11 @@ define { i64, i1 } @test_cmpxchg_strong_i64(i64 addrspace(200)* %ptr, i64 %exp, ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: nop - %1 = cmpxchg i64 addrspace(200)* %ptr, i64 %exp, i64 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i64 %exp, i64 %new acq_rel acquire ret { i64, i1 } %1 } -define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new) nounwind { +define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_strong_cap: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sync @@ -222,66 +222,25 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* ad ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: cmove $c3, $c1 - %1 = cmpxchg i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new acq_rel acquire - ret { i8 addrspace(200)*, i1 } %1 + %1 = cmpxchg ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 } -define { i32 addrspace(200)*, i1 } @test_cmpxchg_strong_cap_i32(i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new) nounwind { -; PURECAP-LABEL: test_cmpxchg_strong_cap_i32: -; PURECAP: # %bb.0: -; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 -; PURECAP-NEXT: cllc $c1, $c3 -; PURECAP-NEXT: ceq $1, $c1, $c4 -; PURECAP-NEXT: beqz $1, .LBB5_3 -; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 -; PURECAP-NEXT: cscc $1, $c5, $c3 -; PURECAP-NEXT: beqz $1, .LBB5_1 -; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB5_3: -; PURECAP-NEXT: ceq $2, $c1, $c4 -; PURECAP-NEXT: sync -; PURECAP-NEXT: cjr $c17 -; PURECAP-NEXT: cmove $c3, $c1 -; -; HYBRID-LABEL: test_cmpxchg_strong_cap_i32: -; HYBRID: # %bb.0: -; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 -; HYBRID-NEXT: cllc $c1, $c3 -; HYBRID-NEXT: ceq $1, $c1, $c4 -; HYBRID-NEXT: beqz $1, .LBB5_3 -; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 -; HYBRID-NEXT: cscc $1, $c5, $c3 -; HYBRID-NEXT: beqz $1, .LBB5_1 -; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB5_3: -; HYBRID-NEXT: ceq $2, $c1, $c4 -; HYBRID-NEXT: sync -; HYBRID-NEXT: jr $ra -; HYBRID-NEXT: cmove $c3, $c1 - %1 = cmpxchg weak i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new acq_rel acquire - ret { i32 addrspace(200)*, i1 } %1 -} - - -define { i8, i1 } @test_cmpxchg_weak_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %new) nounwind { +define { i8, i1 } @test_cmpxchg_weak_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_weak_i8: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sll $1, $5, 0 ; PURECAP-NEXT: sll $3, $4, 0 ; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: cllb $2, $c3 -; PURECAP-NEXT: bne $2, $3, .LBB6_3 +; PURECAP-NEXT: bne $2, $3, .LBB5_3 ; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 ; PURECAP-NEXT: cscb $4, $1, $c3 -; PURECAP-NEXT: beqz $4, .LBB6_1 +; PURECAP-NEXT: beqz $4, .LBB5_1 ; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB6_3: +; PURECAP-NEXT: .LBB5_3: ; PURECAP-NEXT: sll $1, $3, 24 ; PURECAP-NEXT: sra $1, $1, 24 ; PURECAP-NEXT: xor $1, $2, $1 @@ -295,15 +254,15 @@ define { i8, i1 } @test_cmpxchg_weak_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %ne ; HYBRID-NEXT: sll $1, $5, 0 ; HYBRID-NEXT: sll $3, $4, 0 ; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: cllb $2, $c3 -; HYBRID-NEXT: bne $2, $3, .LBB6_3 +; HYBRID-NEXT: bne $2, $3, .LBB5_3 ; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 ; HYBRID-NEXT: cscb $4, $1, $c3 -; HYBRID-NEXT: beqz $4, .LBB6_1 +; HYBRID-NEXT: beqz $4, .LBB5_1 ; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB6_3: +; HYBRID-NEXT: .LBB5_3: ; HYBRID-NEXT: sll $1, $3, 24 ; HYBRID-NEXT: sra $1, $1, 24 ; HYBRID-NEXT: xor $1, $2, $1 @@ -311,25 +270,25 @@ define { i8, i1 } @test_cmpxchg_weak_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %ne ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: nop - %1 = cmpxchg weak i8 addrspace(200)* %ptr, i8 %exp, i8 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i8 %exp, i8 %new acq_rel acquire ret { i8, i1 } %1 } -define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i16 %new) nounwind { +define { i16, i1 } @test_cmpxchg_weak_i16(ptr addrspace(200) %ptr, i16 %exp, i16 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_weak_i16: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sll $1, $5, 0 ; PURECAP-NEXT: sll $3, $4, 0 ; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: cllh $2, $c3 -; PURECAP-NEXT: bne $2, $3, .LBB7_3 +; PURECAP-NEXT: bne $2, $3, .LBB6_3 ; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 ; PURECAP-NEXT: csch $4, $1, $c3 -; PURECAP-NEXT: beqz $4, .LBB7_1 +; PURECAP-NEXT: beqz $4, .LBB6_1 ; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB7_3: +; PURECAP-NEXT: .LBB6_3: ; PURECAP-NEXT: sll $1, $3, 16 ; PURECAP-NEXT: sra $1, $1, 16 ; PURECAP-NEXT: xor $1, $2, $1 @@ -343,15 +302,15 @@ define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i1 ; HYBRID-NEXT: sll $1, $5, 0 ; HYBRID-NEXT: sll $3, $4, 0 ; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: cllh $2, $c3 -; HYBRID-NEXT: bne $2, $3, .LBB7_3 +; HYBRID-NEXT: bne $2, $3, .LBB6_3 ; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 ; HYBRID-NEXT: csch $4, $1, $c3 -; HYBRID-NEXT: beqz $4, .LBB7_1 +; HYBRID-NEXT: beqz $4, .LBB6_1 ; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB7_3: +; HYBRID-NEXT: .LBB6_3: ; HYBRID-NEXT: sll $1, $3, 16 ; HYBRID-NEXT: sra $1, $1, 16 ; HYBRID-NEXT: xor $1, $2, $1 @@ -359,25 +318,25 @@ define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i1 ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: nop - %1 = cmpxchg weak i16 addrspace(200)* %ptr, i16 %exp, i16 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i16 %exp, i16 %new acq_rel acquire ret { i16, i1 } %1 } -define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i32 %new) nounwind { +define { i32, i1 } @test_cmpxchg_weak_i32(ptr addrspace(200) %ptr, i32 %exp, i32 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_weak_i32: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sll $1, $5, 0 ; PURECAP-NEXT: sll $3, $4, 0 ; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: cllw $2, $c3 -; PURECAP-NEXT: bne $2, $3, .LBB8_3 +; PURECAP-NEXT: bne $2, $3, .LBB7_3 ; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 ; PURECAP-NEXT: cscw $4, $1, $c3 -; PURECAP-NEXT: beqz $4, .LBB8_1 +; PURECAP-NEXT: beqz $4, .LBB7_1 ; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB8_3: +; PURECAP-NEXT: .LBB7_3: ; PURECAP-NEXT: xor $1, $2, $3 ; PURECAP-NEXT: sltiu $3, $1, 1 ; PURECAP-NEXT: sync @@ -389,37 +348,37 @@ define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i3 ; HYBRID-NEXT: sll $1, $5, 0 ; HYBRID-NEXT: sll $3, $4, 0 ; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: cllw $2, $c3 -; HYBRID-NEXT: bne $2, $3, .LBB8_3 +; HYBRID-NEXT: bne $2, $3, .LBB7_3 ; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 ; HYBRID-NEXT: cscw $4, $1, $c3 -; HYBRID-NEXT: beqz $4, .LBB8_1 +; HYBRID-NEXT: beqz $4, .LBB7_1 ; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB8_3: +; HYBRID-NEXT: .LBB7_3: ; HYBRID-NEXT: xor $1, $2, $3 ; HYBRID-NEXT: sltiu $3, $1, 1 ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: nop - %1 = cmpxchg weak i32 addrspace(200)* %ptr, i32 %exp, i32 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i32 %exp, i32 %new acq_rel acquire ret { i32, i1 } %1 } -define { i64, i1 } @test_cmpxchg_weak_i64(i64 addrspace(200)* %ptr, i64 %exp, i64 %new) nounwind { +define { i64, i1 } @test_cmpxchg_weak_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_weak_i64: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: clld $2, $c3 -; PURECAP-NEXT: bne $2, $4, .LBB9_3 +; PURECAP-NEXT: bne $2, $4, .LBB8_3 ; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 ; PURECAP-NEXT: cscd $1, $5, $c3 -; PURECAP-NEXT: beqz $1, .LBB9_1 +; PURECAP-NEXT: beqz $1, .LBB8_1 ; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB9_3: +; PURECAP-NEXT: .LBB8_3: ; PURECAP-NEXT: xor $1, $2, $4 ; PURECAP-NEXT: sltiu $3, $1, 1 ; PURECAP-NEXT: sync @@ -429,38 +388,38 @@ define { i64, i1 } @test_cmpxchg_weak_i64(i64 addrspace(200)* %ptr, i64 %exp, i6 ; HYBRID-LABEL: test_cmpxchg_weak_i64: ; HYBRID: # %bb.0: ; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: clld $2, $c3 -; HYBRID-NEXT: bne $2, $4, .LBB9_3 +; HYBRID-NEXT: bne $2, $4, .LBB8_3 ; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 ; HYBRID-NEXT: cscd $1, $5, $c3 -; HYBRID-NEXT: beqz $1, .LBB9_1 +; HYBRID-NEXT: beqz $1, .LBB8_1 ; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB9_3: +; HYBRID-NEXT: .LBB8_3: ; HYBRID-NEXT: xor $1, $2, $4 ; HYBRID-NEXT: sltiu $3, $1, 1 ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: nop - %1 = cmpxchg weak i64 addrspace(200)* %ptr, i64 %exp, i64 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i64 %exp, i64 %new acq_rel acquire ret { i64, i1 } %1 } -define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new) nounwind { +define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_weak_cap: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: cllc $c1, $c3 ; PURECAP-NEXT: ceq $1, $c1, $c4 -; PURECAP-NEXT: beqz $1, .LBB10_3 +; PURECAP-NEXT: beqz $1, .LBB9_3 ; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 ; PURECAP-NEXT: cscc $1, $c5, $c3 -; PURECAP-NEXT: beqz $1, .LBB10_1 +; PURECAP-NEXT: beqz $1, .LBB9_1 ; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB10_3: +; PURECAP-NEXT: .LBB9_3: ; PURECAP-NEXT: ceq $2, $c1, $c4 ; PURECAP-NEXT: sync ; PURECAP-NEXT: cjr $c17 @@ -469,60 +428,21 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addr ; HYBRID-LABEL: test_cmpxchg_weak_cap: ; HYBRID: # %bb.0: ; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: cllc $c1, $c3 ; HYBRID-NEXT: ceq $1, $c1, $c4 -; HYBRID-NEXT: beqz $1, .LBB10_3 +; HYBRID-NEXT: beqz $1, .LBB9_3 ; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 ; HYBRID-NEXT: cscc $1, $c5, $c3 -; HYBRID-NEXT: beqz $1, .LBB10_1 +; HYBRID-NEXT: beqz $1, .LBB9_1 ; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB10_3: +; HYBRID-NEXT: .LBB9_3: ; HYBRID-NEXT: ceq $2, $c1, $c4 ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: cmove $c3, $c1 - %1 = cmpxchg weak i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new acq_rel acquire - ret { i8 addrspace(200)*, i1 } %1 + %1 = cmpxchg weak ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 } -define { i32 addrspace(200)*, i1 } @test_cmpxchg_weak_cap_i32(i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new) nounwind { -; PURECAP-LABEL: test_cmpxchg_weak_cap_i32: -; PURECAP: # %bb.0: -; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 -; PURECAP-NEXT: cllc $c1, $c3 -; PURECAP-NEXT: ceq $1, $c1, $c4 -; PURECAP-NEXT: beqz $1, .LBB11_3 -; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 -; PURECAP-NEXT: cscc $1, $c5, $c3 -; PURECAP-NEXT: beqz $1, .LBB11_1 -; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB11_3: -; PURECAP-NEXT: ceq $2, $c1, $c4 -; PURECAP-NEXT: sync -; PURECAP-NEXT: cjr $c17 -; PURECAP-NEXT: cmove $c3, $c1 -; -; HYBRID-LABEL: test_cmpxchg_weak_cap_i32: -; HYBRID: # %bb.0: -; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 -; HYBRID-NEXT: cllc $c1, $c3 -; HYBRID-NEXT: ceq $1, $c1, $c4 -; HYBRID-NEXT: beqz $1, .LBB11_3 -; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 -; HYBRID-NEXT: cscc $1, $c5, $c3 -; HYBRID-NEXT: beqz $1, .LBB11_1 -; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB11_3: -; HYBRID-NEXT: ceq $2, $c1, $c4 -; HYBRID-NEXT: sync -; HYBRID-NEXT: jr $ra -; HYBRID-NEXT: cmove $c3, $c1 - %1 = cmpxchg weak i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new acq_rel acquire - ret { i32 addrspace(200)*, i1 } %1 -} diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll index 234b3a165517..cd17f5e4f43f 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll @@ -8,7 +8,7 @@ ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-ATOMICS --allow-unused-prefixes ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-LIBCALLS --allow-unused-prefixes -define { i8, i1 } @test_cmpxchg_strong_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %new) nounwind { +define { i8, i1 } @test_cmpxchg_strong_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_i8: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 24 @@ -59,11 +59,11 @@ define { i8, i1 } @test_cmpxchg_strong_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 % ; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg i8 addrspace(200)* %ptr, i8 %exp, i8 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i8 %exp, i8 %new acq_rel acquire ret { i8, i1 } %1 } -define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp, i16 %new) nounwind { +define { i16, i1 } @test_cmpxchg_strong_i16(ptr addrspace(200) %ptr, i16 %exp, i16 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_i16: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 16 @@ -84,12 +84,11 @@ define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp, ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 ; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 6 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 2 ; PURECAP-LIBCALLS-NEXT: csh a1, 6(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 6 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 2 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_2 ; PURECAP-LIBCALLS-NEXT: clh a1, 6(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -115,11 +114,11 @@ define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp, ; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg i16 addrspace(200)* %ptr, i16 %exp, i16 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i16 %exp, i16 %new acq_rel acquire ret { i16, i1 } %1 } -define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp, i32 %new) nounwind { +define { i32, i1 } @test_cmpxchg_strong_i32(ptr addrspace(200) %ptr, i32 %exp, i32 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_i32: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 @@ -138,12 +137,11 @@ define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp, ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 ; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 4 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 4 ; PURECAP-LIBCALLS-NEXT: csw a1, 4(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 4 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 4 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_4 ; PURECAP-LIBCALLS-NEXT: clw a1, 4(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -169,11 +167,11 @@ define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp, ; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg i32 addrspace(200)* %ptr, i32 %exp, i32 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i32 %exp, i32 %new acq_rel acquire ret { i32, i1 } %1 } -define { i64, i1 } @test_cmpxchg_strong_i64(i64 addrspace(200)* %ptr, i64 %exp, i64 %new) nounwind { +define { i64, i1 } @test_cmpxchg_strong_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_strong_i64: ; PURECAP: # %bb.0: ; PURECAP-NEXT: cincoffset csp, csp, -32 @@ -183,10 +181,10 @@ define { i64, i1 } @test_cmpxchg_strong_i64(i64 addrspace(200)* %ptr, i64 %exp, ; PURECAP-NEXT: mv a7, a4 ; PURECAP-NEXT: cmove ct0, ca1 ; PURECAP-NEXT: cmove cs0, ca0 -; PURECAP-NEXT: cincoffset ca0, csp, 8 -; PURECAP-NEXT: csetbounds ca1, ca0, 8 ; PURECAP-NEXT: csw a3, 12(csp) ; PURECAP-NEXT: csw a2, 8(csp) +; PURECAP-NEXT: cincoffset ca0, csp, 8 +; PURECAP-NEXT: csetbounds ca1, ca0, 8 ; PURECAP-NEXT: li a4, 4 ; PURECAP-NEXT: li a5, 2 ; PURECAP-NEXT: cmove ca0, ct0 @@ -230,11 +228,11 @@ define { i64, i1 } @test_cmpxchg_strong_i64(i64 addrspace(200)* %ptr, i64 %exp, ; HYBRID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg i64 addrspace(200)* %ptr, i64 %exp, i64 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i64 %exp, i64 %new acq_rel acquire ret { i64, i1 } %1 } -define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new) nounwind { +define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_cap: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 @@ -253,12 +251,11 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* ad ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 ; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 0 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 8 ; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 8 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap ; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -284,77 +281,22 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* ad ; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new acq_rel acquire - ret { i8 addrspace(200)*, i1 } %1 + %1 = cmpxchg ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 } -define { i32 addrspace(200)*, i1 } @test_cmpxchg_strong_cap_i32(i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new) nounwind { -; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_cap_i32: -; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 -; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB5_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 -; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB5_1 -; PURECAP-ATOMICS-NEXT: .LBB5_3: -; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 -; PURECAP-ATOMICS-NEXT: seqz a1, a0 -; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 -; PURECAP-ATOMICS-NEXT: cret -; -; PURECAP-LIBCALLS-LABEL: test_cmpxchg_strong_cap_i32: -; PURECAP-LIBCALLS: # %bb.0: -; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 -; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 0 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 8 -; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) -; PURECAP-LIBCALLS-NEXT: li a3, 4 -; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 -; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap -; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) -; PURECAP-LIBCALLS-NEXT: mv a2, a0 -; PURECAP-LIBCALLS-NEXT: cmove ca0, ca1 -; PURECAP-LIBCALLS-NEXT: mv a1, a2 -; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 -; PURECAP-LIBCALLS-NEXT: cret -; -; HYBRID-LABEL: test_cmpxchg_strong_cap_i32: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sc ca1, 0(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a3, 4 -; HYBRID-NEXT: li a4, 2 -; HYBRID-NEXT: call __atomic_compare_exchange_cap_c@plt -; HYBRID-NEXT: lc ca1, 0(sp) -; HYBRID-NEXT: mv a2, a0 -; HYBRID-NEXT: cmove ca0, ca1 -; HYBRID-NEXT: mv a1, a2 -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret - %1 = cmpxchg weak i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new acq_rel acquire - ret { i32 addrspace(200)*, i1 } %1 -} - - -define { i8, i1 } @test_cmpxchg_weak_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %new) nounwind { +define { i8, i1 } @test_cmpxchg_weak_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i8: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 24 ; PURECAP-ATOMICS-NEXT: srai a1, a1, 24 -; PURECAP-ATOMICS-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.b.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB6_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB5_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.b.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB6_1 -; PURECAP-ATOMICS-NEXT: .LBB6_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB5_1 +; PURECAP-ATOMICS-NEXT: .LBB5_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -394,22 +336,22 @@ define { i8, i1 } @test_cmpxchg_weak_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %ne ; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg weak i8 addrspace(200)* %ptr, i8 %exp, i8 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i8 %exp, i8 %new acq_rel acquire ret { i8, i1 } %1 } -define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i16 %new) nounwind { +define { i16, i1 } @test_cmpxchg_weak_i16(ptr addrspace(200) %ptr, i16 %exp, i16 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i16: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 16 ; PURECAP-ATOMICS-NEXT: srai a1, a1, 16 -; PURECAP-ATOMICS-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.h.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB7_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB6_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.h.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB7_1 -; PURECAP-ATOMICS-NEXT: .LBB7_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB6_1 +; PURECAP-ATOMICS-NEXT: .LBB6_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -419,12 +361,11 @@ define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i1 ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 ; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 6 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 2 ; PURECAP-LIBCALLS-NEXT: csh a1, 6(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 6 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 2 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_2 ; PURECAP-LIBCALLS-NEXT: clh a1, 6(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -450,20 +391,20 @@ define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i1 ; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg weak i16 addrspace(200)* %ptr, i16 %exp, i16 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i16 %exp, i16 %new acq_rel acquire ret { i16, i1 } %1 } -define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i32 %new) nounwind { +define { i32, i1 } @test_cmpxchg_weak_i32(ptr addrspace(200) %ptr, i32 %exp, i32 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i32: ; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.w.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB8_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB7_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.w.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB8_1 -; PURECAP-ATOMICS-NEXT: .LBB8_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB7_1 +; PURECAP-ATOMICS-NEXT: .LBB7_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -473,12 +414,11 @@ define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i3 ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 ; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 4 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 4 ; PURECAP-LIBCALLS-NEXT: csw a1, 4(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 4 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 4 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_4 ; PURECAP-LIBCALLS-NEXT: clw a1, 4(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -504,11 +444,11 @@ define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i3 ; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg weak i32 addrspace(200)* %ptr, i32 %exp, i32 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i32 %exp, i32 %new acq_rel acquire ret { i32, i1 } %1 } -define { i64, i1 } @test_cmpxchg_weak_i64(i64 addrspace(200)* %ptr, i64 %exp, i64 %new) nounwind { +define { i64, i1 } @test_cmpxchg_weak_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_weak_i64: ; PURECAP: # %bb.0: ; PURECAP-NEXT: cincoffset csp, csp, -32 @@ -518,10 +458,10 @@ define { i64, i1 } @test_cmpxchg_weak_i64(i64 addrspace(200)* %ptr, i64 %exp, i6 ; PURECAP-NEXT: mv a7, a4 ; PURECAP-NEXT: cmove ct0, ca1 ; PURECAP-NEXT: cmove cs0, ca0 -; PURECAP-NEXT: cincoffset ca0, csp, 8 -; PURECAP-NEXT: csetbounds ca1, ca0, 8 ; PURECAP-NEXT: csw a3, 12(csp) ; PURECAP-NEXT: csw a2, 8(csp) +; PURECAP-NEXT: cincoffset ca0, csp, 8 +; PURECAP-NEXT: csetbounds ca1, ca0, 8 ; PURECAP-NEXT: li a4, 4 ; PURECAP-NEXT: li a5, 2 ; PURECAP-NEXT: cmove ca0, ct0 @@ -565,20 +505,20 @@ define { i64, i1 } @test_cmpxchg_weak_i64(i64 addrspace(200)* %ptr, i64 %exp, i6 ; HYBRID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg weak i64 addrspace(200)* %ptr, i64 %exp, i64 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i64 %exp, i64 %new acq_rel acquire ret { i64, i1 } %1 } -define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new) nounwind { +define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_cap: ; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB10_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB9_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB10_1 -; PURECAP-ATOMICS-NEXT: .LBB10_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB9_1 +; PURECAP-ATOMICS-NEXT: .LBB9_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 @@ -588,12 +528,11 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addr ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 ; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 0 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 8 ; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 8 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap ; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -619,60 +558,7 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addr ; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg weak i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new acq_rel acquire - ret { i8 addrspace(200)*, i1 } %1 + %1 = cmpxchg weak ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 } -define { i32 addrspace(200)*, i1 } @test_cmpxchg_weak_cap_i32(i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new) nounwind { -; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_cap_i32: -; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 -; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB11_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 -; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB11_1 -; PURECAP-ATOMICS-NEXT: .LBB11_3: -; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 -; PURECAP-ATOMICS-NEXT: seqz a1, a0 -; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 -; PURECAP-ATOMICS-NEXT: cret -; -; PURECAP-LIBCALLS-LABEL: test_cmpxchg_weak_cap_i32: -; PURECAP-LIBCALLS: # %bb.0: -; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 -; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 0 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 8 -; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) -; PURECAP-LIBCALLS-NEXT: li a3, 4 -; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 -; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap -; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) -; PURECAP-LIBCALLS-NEXT: mv a2, a0 -; PURECAP-LIBCALLS-NEXT: cmove ca0, ca1 -; PURECAP-LIBCALLS-NEXT: mv a1, a2 -; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 -; PURECAP-LIBCALLS-NEXT: cret -; -; HYBRID-LABEL: test_cmpxchg_weak_cap_i32: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sc ca1, 0(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a3, 4 -; HYBRID-NEXT: li a4, 2 -; HYBRID-NEXT: call __atomic_compare_exchange_cap_c@plt -; HYBRID-NEXT: lc ca1, 0(sp) -; HYBRID-NEXT: mv a2, a0 -; HYBRID-NEXT: cmove ca0, ca1 -; HYBRID-NEXT: mv a1, a2 -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret - %1 = cmpxchg weak i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new acq_rel acquire - ret { i32 addrspace(200)*, i1 } %1 -} diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll index bd47db6d24a2..5639d60836d9 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll @@ -8,7 +8,7 @@ ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-ATOMICS --allow-unused-prefixes ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-LIBCALLS --allow-unused-prefixes -define { i8, i1 } @test_cmpxchg_strong_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %new) nounwind { +define { i8, i1 } @test_cmpxchg_strong_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_i8: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 56 @@ -59,11 +59,11 @@ define { i8, i1 } @test_cmpxchg_strong_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 % ; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg i8 addrspace(200)* %ptr, i8 %exp, i8 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i8 %exp, i8 %new acq_rel acquire ret { i8, i1 } %1 } -define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp, i16 %new) nounwind { +define { i16, i1 } @test_cmpxchg_strong_i16(ptr addrspace(200) %ptr, i16 %exp, i16 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_i16: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 48 @@ -84,12 +84,11 @@ define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp, ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 ; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 14 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 2 ; PURECAP-LIBCALLS-NEXT: csh a1, 14(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 14 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 2 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_2 ; PURECAP-LIBCALLS-NEXT: clh a1, 14(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -115,11 +114,11 @@ define { i16, i1 } @test_cmpxchg_strong_i16(i16 addrspace(200)* %ptr, i16 %exp, ; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg i16 addrspace(200)* %ptr, i16 %exp, i16 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i16 %exp, i16 %new acq_rel acquire ret { i16, i1 } %1 } -define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp, i32 %new) nounwind { +define { i32, i1 } @test_cmpxchg_strong_i32(ptr addrspace(200) %ptr, i32 %exp, i32 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_i32: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: sext.w a1, a1 @@ -139,12 +138,11 @@ define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp, ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 ; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 12 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 4 ; PURECAP-LIBCALLS-NEXT: csw a1, 12(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 12 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 4 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_4 ; PURECAP-LIBCALLS-NEXT: clw a1, 12(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -170,11 +168,11 @@ define { i32, i1 } @test_cmpxchg_strong_i32(i32 addrspace(200)* %ptr, i32 %exp, ; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg i32 addrspace(200)* %ptr, i32 %exp, i32 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i32 %exp, i32 %new acq_rel acquire ret { i32, i1 } %1 } -define { i64, i1 } @test_cmpxchg_strong_i64(i64 addrspace(200)* %ptr, i64 %exp, i64 %new) nounwind { +define { i64, i1 } @test_cmpxchg_strong_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_i64: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 @@ -193,12 +191,11 @@ define { i64, i1 } @test_cmpxchg_strong_i64(i64 addrspace(200)* %ptr, i64 %exp, ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 ; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 8 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 8 ; PURECAP-LIBCALLS-NEXT: csd a1, 8(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 8 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 8 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_8 ; PURECAP-LIBCALLS-NEXT: cld a1, 8(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -224,11 +221,11 @@ define { i64, i1 } @test_cmpxchg_strong_i64(i64 addrspace(200)* %ptr, i64 %exp, ; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg i64 addrspace(200)* %ptr, i64 %exp, i64 %new acq_rel acquire + %1 = cmpxchg ptr addrspace(200) %ptr, i64 %exp, i64 %new acq_rel acquire ret { i64, i1 } %1 } -define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new) nounwind { +define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_cap: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 @@ -247,12 +244,11 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* ad ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 ; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 0 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 16 ; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 16 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap ; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -278,77 +274,22 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_strong_cap(i8 addrspace(200)* ad ; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 32 ; HYBRID-NEXT: ret - %1 = cmpxchg i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new acq_rel acquire - ret { i8 addrspace(200)*, i1 } %1 + %1 = cmpxchg ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 } -define { i32 addrspace(200)*, i1 } @test_cmpxchg_strong_cap_i32(i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new) nounwind { -; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_cap_i32: -; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 -; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB5_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 -; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB5_1 -; PURECAP-ATOMICS-NEXT: .LBB5_3: -; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 -; PURECAP-ATOMICS-NEXT: seqz a1, a0 -; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 -; PURECAP-ATOMICS-NEXT: cret -; -; PURECAP-LIBCALLS-LABEL: test_cmpxchg_strong_cap_i32: -; PURECAP-LIBCALLS: # %bb.0: -; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 -; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 0 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 16 -; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) -; PURECAP-LIBCALLS-NEXT: li a3, 4 -; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 -; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap -; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) -; PURECAP-LIBCALLS-NEXT: mv a2, a0 -; PURECAP-LIBCALLS-NEXT: cmove ca0, ca1 -; PURECAP-LIBCALLS-NEXT: mv a1, a2 -; PURECAP-LIBCALLS-NEXT: clc cra, 16(csp) # 16-byte Folded Reload -; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 -; PURECAP-LIBCALLS-NEXT: cret -; -; HYBRID-LABEL: test_cmpxchg_strong_cap_i32: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -32 -; HYBRID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sc ca1, 0(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a3, 4 -; HYBRID-NEXT: li a4, 2 -; HYBRID-NEXT: call __atomic_compare_exchange_cap_c@plt -; HYBRID-NEXT: lc ca1, 0(sp) -; HYBRID-NEXT: mv a2, a0 -; HYBRID-NEXT: cmove ca0, ca1 -; HYBRID-NEXT: mv a1, a2 -; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 32 -; HYBRID-NEXT: ret - %1 = cmpxchg weak i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new acq_rel acquire - ret { i32 addrspace(200)*, i1 } %1 -} - - -define { i8, i1 } @test_cmpxchg_weak_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %new) nounwind { +define { i8, i1 } @test_cmpxchg_weak_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i8: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 56 ; PURECAP-ATOMICS-NEXT: srai a1, a1, 56 -; PURECAP-ATOMICS-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.b.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB6_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB5_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.b.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB6_1 -; PURECAP-ATOMICS-NEXT: .LBB6_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB5_1 +; PURECAP-ATOMICS-NEXT: .LBB5_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -388,22 +329,22 @@ define { i8, i1 } @test_cmpxchg_weak_i8(i8 addrspace(200)* %ptr, i8 %exp, i8 %ne ; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg weak i8 addrspace(200)* %ptr, i8 %exp, i8 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i8 %exp, i8 %new acq_rel acquire ret { i8, i1 } %1 } -define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i16 %new) nounwind { +define { i16, i1 } @test_cmpxchg_weak_i16(ptr addrspace(200) %ptr, i16 %exp, i16 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i16: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 48 ; PURECAP-ATOMICS-NEXT: srai a1, a1, 48 -; PURECAP-ATOMICS-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.h.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB7_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB6_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.h.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB7_1 -; PURECAP-ATOMICS-NEXT: .LBB7_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB6_1 +; PURECAP-ATOMICS-NEXT: .LBB6_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -413,12 +354,11 @@ define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i1 ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 ; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 14 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 2 ; PURECAP-LIBCALLS-NEXT: csh a1, 14(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 14 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 2 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_2 ; PURECAP-LIBCALLS-NEXT: clh a1, 14(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -444,21 +384,21 @@ define { i16, i1 } @test_cmpxchg_weak_i16(i16 addrspace(200)* %ptr, i16 %exp, i1 ; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg weak i16 addrspace(200)* %ptr, i16 %exp, i16 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i16 %exp, i16 %new acq_rel acquire ret { i16, i1 } %1 } -define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i32 %new) nounwind { +define { i32, i1 } @test_cmpxchg_weak_i32(ptr addrspace(200) %ptr, i32 %exp, i32 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i32: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: sext.w a1, a1 -; PURECAP-ATOMICS-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.w.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB8_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB7_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.w.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB8_1 -; PURECAP-ATOMICS-NEXT: .LBB8_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB7_1 +; PURECAP-ATOMICS-NEXT: .LBB7_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -468,12 +408,11 @@ define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i3 ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 ; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 12 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 4 ; PURECAP-LIBCALLS-NEXT: csw a1, 12(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 12 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 4 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_4 ; PURECAP-LIBCALLS-NEXT: clw a1, 12(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -499,20 +438,20 @@ define { i32, i1 } @test_cmpxchg_weak_i32(i32 addrspace(200)* %ptr, i32 %exp, i3 ; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg weak i32 addrspace(200)* %ptr, i32 %exp, i32 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i32 %exp, i32 %new acq_rel acquire ret { i32, i1 } %1 } -define { i64, i1 } @test_cmpxchg_weak_i64(i64 addrspace(200)* %ptr, i64 %exp, i64 %new) nounwind { +define { i64, i1 } @test_cmpxchg_weak_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i64: ; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.d.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB9_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB8_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.d.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB9_1 -; PURECAP-ATOMICS-NEXT: .LBB9_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB8_1 +; PURECAP-ATOMICS-NEXT: .LBB8_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -522,12 +461,11 @@ define { i64, i1 } @test_cmpxchg_weak_i64(i64 addrspace(200)* %ptr, i64 %exp, i6 ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 ; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 8 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 8 ; PURECAP-LIBCALLS-NEXT: csd a1, 8(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 8 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 8 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_8 ; PURECAP-LIBCALLS-NEXT: cld a1, 8(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -553,20 +491,20 @@ define { i64, i1 } @test_cmpxchg_weak_i64(i64 addrspace(200)* %ptr, i64 %exp, i6 ; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 16 ; HYBRID-NEXT: ret - %1 = cmpxchg weak i64 addrspace(200)* %ptr, i64 %exp, i64 %new acq_rel acquire + %1 = cmpxchg weak ptr addrspace(200) %ptr, i64 %exp, i64 %new acq_rel acquire ret { i64, i1 } %1 } -define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new) nounwind { +define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_cap: ; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB10_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB9_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB10_1 -; PURECAP-ATOMICS-NEXT: .LBB10_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB9_1 +; PURECAP-ATOMICS-NEXT: .LBB9_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 @@ -576,12 +514,11 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addr ; PURECAP-LIBCALLS: # %bb.0: ; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 ; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 0 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 16 ; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 16 ; PURECAP-LIBCALLS-NEXT: li a3, 4 ; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 ; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap ; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) ; PURECAP-LIBCALLS-NEXT: mv a2, a0 @@ -607,60 +544,7 @@ define { i8 addrspace(200)*, i1 } @test_cmpxchg_weak_cap(i8 addrspace(200)* addr ; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; HYBRID-NEXT: addi sp, sp, 32 ; HYBRID-NEXT: ret - %1 = cmpxchg weak i8 addrspace(200)* addrspace(200)* %ptr, i8 addrspace(200)* %exp, i8 addrspace(200)* %new acq_rel acquire - ret { i8 addrspace(200)*, i1 } %1 + %1 = cmpxchg weak ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 } -define { i32 addrspace(200)*, i1 } @test_cmpxchg_weak_cap_i32(i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new) nounwind { -; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_cap_i32: -; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 -; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB11_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 -; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB11_1 -; PURECAP-ATOMICS-NEXT: .LBB11_3: -; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 -; PURECAP-ATOMICS-NEXT: seqz a1, a0 -; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 -; PURECAP-ATOMICS-NEXT: cret -; -; PURECAP-LIBCALLS-LABEL: test_cmpxchg_weak_cap_i32: -; PURECAP-LIBCALLS: # %bb.0: -; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 -; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill -; PURECAP-LIBCALLS-NEXT: cincoffset ca3, csp, 0 -; PURECAP-LIBCALLS-NEXT: csetbounds ca5, ca3, 16 -; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) -; PURECAP-LIBCALLS-NEXT: li a3, 4 -; PURECAP-LIBCALLS-NEXT: li a4, 2 -; PURECAP-LIBCALLS-NEXT: cmove ca1, ca5 -; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap -; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) -; PURECAP-LIBCALLS-NEXT: mv a2, a0 -; PURECAP-LIBCALLS-NEXT: cmove ca0, ca1 -; PURECAP-LIBCALLS-NEXT: mv a1, a2 -; PURECAP-LIBCALLS-NEXT: clc cra, 16(csp) # 16-byte Folded Reload -; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 -; PURECAP-LIBCALLS-NEXT: cret -; -; HYBRID-LABEL: test_cmpxchg_weak_cap_i32: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -32 -; HYBRID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sc ca1, 0(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a3, 4 -; HYBRID-NEXT: li a4, 2 -; HYBRID-NEXT: call __atomic_compare_exchange_cap_c@plt -; HYBRID-NEXT: lc ca1, 0(sp) -; HYBRID-NEXT: mv a2, a0 -; HYBRID-NEXT: cmove ca0, ca1 -; HYBRID-NEXT: mv a1, a2 -; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 32 -; HYBRID-NEXT: ret - %1 = cmpxchg weak i32 addrspace(200)* addrspace(200)* %ptr, i32 addrspace(200)* %exp, i32 addrspace(200)* %new acq_rel acquire - ret { i32 addrspace(200)*, i1 } %1 -} From b47f510fbbc57f714144d6e9ec83385e8cc3da85 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Wed, 20 Sep 2023 18:41:55 -0700 Subject: [PATCH 04/18] [CHERI-Generic] Add a baseline test for cmpxchg exact This should use an exact capability comparison instead of an address compare. --- .../CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll | 12 ++ .../CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll | 182 +++++++++++++----- .../CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll | 148 ++++++++++++-- .../CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll | 158 ++++++++++++--- 4 files changed, 405 insertions(+), 95 deletions(-) diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll index 9f9e544dc3dc..1485003aaf3d 100644 --- a/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll @@ -33,6 +33,12 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) % ret { ptr addrspace(200) , i1 } %1 } +; TODO: this should use an exact equals comparison for the LL/SC +define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { + %1 = cmpxchg exact ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 +} + define { i8, i1 } @test_cmpxchg_weak_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { %1 = cmpxchg weak ptr addrspace(200) %ptr, i8 %exp, i8 %new acq_rel acquire ret { i8, i1 } %1 @@ -58,3 +64,9 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %pt ret { ptr addrspace(200) , i1 } %1 } +; TODO: this should use an exact equals comparison for the LL/SC +define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { + %1 = cmpxchg weak exact ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 +} + diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll index 0613e364669d..72bb2c432834 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll @@ -226,21 +226,62 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) % ret { ptr addrspace(200) , i1 } %1 } +; TODO: this should use an exact equals comparison for the LL/SC +define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { +; PURECAP-LABEL: test_cmpxchg_strong_cap_exact: +; PURECAP: # %bb.0: +; PURECAP-NEXT: sync +; PURECAP-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: cllc $c1, $c3 +; PURECAP-NEXT: ceq $1, $c1, $c4 +; PURECAP-NEXT: beqz $1, .LBB5_3 +; PURECAP-NEXT: nop +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 +; PURECAP-NEXT: cscc $1, $c5, $c3 +; PURECAP-NEXT: beqz $1, .LBB5_1 +; PURECAP-NEXT: nop +; PURECAP-NEXT: .LBB5_3: +; PURECAP-NEXT: ceq $2, $c1, $c4 +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cmove $c3, $c1 +; +; HYBRID-LABEL: test_cmpxchg_strong_cap_exact: +; HYBRID: # %bb.0: +; HYBRID-NEXT: sync +; HYBRID-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: cllc $c1, $c3 +; HYBRID-NEXT: ceq $1, $c1, $c4 +; HYBRID-NEXT: beqz $1, .LBB5_3 +; HYBRID-NEXT: nop +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 +; HYBRID-NEXT: cscc $1, $c5, $c3 +; HYBRID-NEXT: beqz $1, .LBB5_1 +; HYBRID-NEXT: nop +; HYBRID-NEXT: .LBB5_3: +; HYBRID-NEXT: ceq $2, $c1, $c4 +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: cmove $c3, $c1 + %1 = cmpxchg exact ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 +} + define { i8, i1 } @test_cmpxchg_weak_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_weak_i8: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sll $1, $5, 0 ; PURECAP-NEXT: sll $3, $4, 0 ; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: cllb $2, $c3 -; PURECAP-NEXT: bne $2, $3, .LBB5_3 +; PURECAP-NEXT: bne $2, $3, .LBB6_3 ; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 ; PURECAP-NEXT: cscb $4, $1, $c3 -; PURECAP-NEXT: beqz $4, .LBB5_1 +; PURECAP-NEXT: beqz $4, .LBB6_1 ; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB5_3: +; PURECAP-NEXT: .LBB6_3: ; PURECAP-NEXT: sll $1, $3, 24 ; PURECAP-NEXT: sra $1, $1, 24 ; PURECAP-NEXT: xor $1, $2, $1 @@ -254,15 +295,15 @@ define { i8, i1 } @test_cmpxchg_weak_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %ne ; HYBRID-NEXT: sll $1, $5, 0 ; HYBRID-NEXT: sll $3, $4, 0 ; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: cllb $2, $c3 -; HYBRID-NEXT: bne $2, $3, .LBB5_3 +; HYBRID-NEXT: bne $2, $3, .LBB6_3 ; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 ; HYBRID-NEXT: cscb $4, $1, $c3 -; HYBRID-NEXT: beqz $4, .LBB5_1 +; HYBRID-NEXT: beqz $4, .LBB6_1 ; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB5_3: +; HYBRID-NEXT: .LBB6_3: ; HYBRID-NEXT: sll $1, $3, 24 ; HYBRID-NEXT: sra $1, $1, 24 ; HYBRID-NEXT: xor $1, $2, $1 @@ -280,15 +321,15 @@ define { i16, i1 } @test_cmpxchg_weak_i16(ptr addrspace(200) %ptr, i16 %exp, i16 ; PURECAP-NEXT: sll $1, $5, 0 ; PURECAP-NEXT: sll $3, $4, 0 ; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: cllh $2, $c3 -; PURECAP-NEXT: bne $2, $3, .LBB6_3 +; PURECAP-NEXT: bne $2, $3, .LBB7_3 ; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 ; PURECAP-NEXT: csch $4, $1, $c3 -; PURECAP-NEXT: beqz $4, .LBB6_1 +; PURECAP-NEXT: beqz $4, .LBB7_1 ; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB6_3: +; PURECAP-NEXT: .LBB7_3: ; PURECAP-NEXT: sll $1, $3, 16 ; PURECAP-NEXT: sra $1, $1, 16 ; PURECAP-NEXT: xor $1, $2, $1 @@ -302,15 +343,15 @@ define { i16, i1 } @test_cmpxchg_weak_i16(ptr addrspace(200) %ptr, i16 %exp, i16 ; HYBRID-NEXT: sll $1, $5, 0 ; HYBRID-NEXT: sll $3, $4, 0 ; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: cllh $2, $c3 -; HYBRID-NEXT: bne $2, $3, .LBB6_3 +; HYBRID-NEXT: bne $2, $3, .LBB7_3 ; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 ; HYBRID-NEXT: csch $4, $1, $c3 -; HYBRID-NEXT: beqz $4, .LBB6_1 +; HYBRID-NEXT: beqz $4, .LBB7_1 ; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB6_3: +; HYBRID-NEXT: .LBB7_3: ; HYBRID-NEXT: sll $1, $3, 16 ; HYBRID-NEXT: sra $1, $1, 16 ; HYBRID-NEXT: xor $1, $2, $1 @@ -328,15 +369,15 @@ define { i32, i1 } @test_cmpxchg_weak_i32(ptr addrspace(200) %ptr, i32 %exp, i32 ; PURECAP-NEXT: sll $1, $5, 0 ; PURECAP-NEXT: sll $3, $4, 0 ; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: cllw $2, $c3 -; PURECAP-NEXT: bne $2, $3, .LBB7_3 +; PURECAP-NEXT: bne $2, $3, .LBB8_3 ; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 ; PURECAP-NEXT: cscw $4, $1, $c3 -; PURECAP-NEXT: beqz $4, .LBB7_1 +; PURECAP-NEXT: beqz $4, .LBB8_1 ; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB7_3: +; PURECAP-NEXT: .LBB8_3: ; PURECAP-NEXT: xor $1, $2, $3 ; PURECAP-NEXT: sltiu $3, $1, 1 ; PURECAP-NEXT: sync @@ -348,15 +389,15 @@ define { i32, i1 } @test_cmpxchg_weak_i32(ptr addrspace(200) %ptr, i32 %exp, i32 ; HYBRID-NEXT: sll $1, $5, 0 ; HYBRID-NEXT: sll $3, $4, 0 ; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: cllw $2, $c3 -; HYBRID-NEXT: bne $2, $3, .LBB7_3 +; HYBRID-NEXT: bne $2, $3, .LBB8_3 ; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 ; HYBRID-NEXT: cscw $4, $1, $c3 -; HYBRID-NEXT: beqz $4, .LBB7_1 +; HYBRID-NEXT: beqz $4, .LBB8_1 ; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB7_3: +; HYBRID-NEXT: .LBB8_3: ; HYBRID-NEXT: xor $1, $2, $3 ; HYBRID-NEXT: sltiu $3, $1, 1 ; HYBRID-NEXT: sync @@ -370,15 +411,15 @@ define { i64, i1 } @test_cmpxchg_weak_i64(ptr addrspace(200) %ptr, i64 %exp, i64 ; PURECAP-LABEL: test_cmpxchg_weak_i64: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: clld $2, $c3 -; PURECAP-NEXT: bne $2, $4, .LBB8_3 +; PURECAP-NEXT: bne $2, $4, .LBB9_3 ; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 ; PURECAP-NEXT: cscd $1, $5, $c3 -; PURECAP-NEXT: beqz $1, .LBB8_1 +; PURECAP-NEXT: beqz $1, .LBB9_1 ; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB8_3: +; PURECAP-NEXT: .LBB9_3: ; PURECAP-NEXT: xor $1, $2, $4 ; PURECAP-NEXT: sltiu $3, $1, 1 ; PURECAP-NEXT: sync @@ -388,15 +429,15 @@ define { i64, i1 } @test_cmpxchg_weak_i64(ptr addrspace(200) %ptr, i64 %exp, i64 ; HYBRID-LABEL: test_cmpxchg_weak_i64: ; HYBRID: # %bb.0: ; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: clld $2, $c3 -; HYBRID-NEXT: bne $2, $4, .LBB8_3 +; HYBRID-NEXT: bne $2, $4, .LBB9_3 ; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 ; HYBRID-NEXT: cscd $1, $5, $c3 -; HYBRID-NEXT: beqz $1, .LBB8_1 +; HYBRID-NEXT: beqz $1, .LBB9_1 ; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB8_3: +; HYBRID-NEXT: .LBB9_3: ; HYBRID-NEXT: xor $1, $2, $4 ; HYBRID-NEXT: sltiu $3, $1, 1 ; HYBRID-NEXT: sync @@ -410,16 +451,16 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %pt ; PURECAP-LABEL: test_cmpxchg_weak_cap: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sync -; PURECAP-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: cllc $c1, $c3 ; PURECAP-NEXT: ceq $1, $c1, $c4 -; PURECAP-NEXT: beqz $1, .LBB9_3 +; PURECAP-NEXT: beqz $1, .LBB10_3 ; PURECAP-NEXT: nop -; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 ; PURECAP-NEXT: cscc $1, $c5, $c3 -; PURECAP-NEXT: beqz $1, .LBB9_1 +; PURECAP-NEXT: beqz $1, .LBB10_1 ; PURECAP-NEXT: nop -; PURECAP-NEXT: .LBB9_3: +; PURECAP-NEXT: .LBB10_3: ; PURECAP-NEXT: ceq $2, $c1, $c4 ; PURECAP-NEXT: sync ; PURECAP-NEXT: cjr $c17 @@ -428,16 +469,16 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %pt ; HYBRID-LABEL: test_cmpxchg_weak_cap: ; HYBRID: # %bb.0: ; HYBRID-NEXT: sync -; HYBRID-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: cllc $c1, $c3 ; HYBRID-NEXT: ceq $1, $c1, $c4 -; HYBRID-NEXT: beqz $1, .LBB9_3 +; HYBRID-NEXT: beqz $1, .LBB10_3 ; HYBRID-NEXT: nop -; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 ; HYBRID-NEXT: cscc $1, $c5, $c3 -; HYBRID-NEXT: beqz $1, .LBB9_1 +; HYBRID-NEXT: beqz $1, .LBB10_1 ; HYBRID-NEXT: nop -; HYBRID-NEXT: .LBB9_3: +; HYBRID-NEXT: .LBB10_3: ; HYBRID-NEXT: ceq $2, $c1, $c4 ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra @@ -446,3 +487,44 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %pt ret { ptr addrspace(200) , i1 } %1 } +; TODO: this should use an exact equals comparison for the LL/SC +define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { +; PURECAP-LABEL: test_cmpxchg_weak_cap_exact: +; PURECAP: # %bb.0: +; PURECAP-NEXT: sync +; PURECAP-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: cllc $c1, $c3 +; PURECAP-NEXT: ceq $1, $c1, $c4 +; PURECAP-NEXT: beqz $1, .LBB11_3 +; PURECAP-NEXT: nop +; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 +; PURECAP-NEXT: cscc $1, $c5, $c3 +; PURECAP-NEXT: beqz $1, .LBB11_1 +; PURECAP-NEXT: nop +; PURECAP-NEXT: .LBB11_3: +; PURECAP-NEXT: ceq $2, $c1, $c4 +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cmove $c3, $c1 +; +; HYBRID-LABEL: test_cmpxchg_weak_cap_exact: +; HYBRID: # %bb.0: +; HYBRID-NEXT: sync +; HYBRID-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: cllc $c1, $c3 +; HYBRID-NEXT: ceq $1, $c1, $c4 +; HYBRID-NEXT: beqz $1, .LBB11_3 +; HYBRID-NEXT: nop +; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 +; HYBRID-NEXT: cscc $1, $c5, $c3 +; HYBRID-NEXT: beqz $1, .LBB11_1 +; HYBRID-NEXT: nop +; HYBRID-NEXT: .LBB11_3: +; HYBRID-NEXT: ceq $2, $c1, $c4 +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: cmove $c3, $c1 + %1 = cmpxchg weak exact ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 +} + diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll index cd17f5e4f43f..35f7852fb5e9 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll @@ -285,18 +285,72 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) % ret { ptr addrspace(200) , i1 } %1 } +; TODO: this should use an exact equals comparison for the LL/SC +define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { +; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_cap_exact: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB5_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 +; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB5_1 +; PURECAP-ATOMICS-NEXT: .LBB5_3: +; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 +; PURECAP-ATOMICS-NEXT: seqz a1, a0 +; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: test_cmpxchg_strong_cap_exact: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 8 +; PURECAP-LIBCALLS-NEXT: li a3, 4 +; PURECAP-LIBCALLS-NEXT: li a4, 2 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap +; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: mv a2, a0 +; PURECAP-LIBCALLS-NEXT: cmove ca0, ca1 +; PURECAP-LIBCALLS-NEXT: mv a1, a2 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-LABEL: test_cmpxchg_strong_cap_exact: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sc ca1, 0(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a3, 4 +; HYBRID-NEXT: li a4, 2 +; HYBRID-NEXT: call __atomic_compare_exchange_cap_c@plt +; HYBRID-NEXT: lc ca1, 0(sp) +; HYBRID-NEXT: mv a2, a0 +; HYBRID-NEXT: cmove ca0, ca1 +; HYBRID-NEXT: mv a1, a2 +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret + %1 = cmpxchg exact ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 +} + define { i8, i1 } @test_cmpxchg_weak_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i8: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 24 ; PURECAP-ATOMICS-NEXT: srai a1, a1, 24 -; PURECAP-ATOMICS-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.b.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB5_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB6_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.b.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB5_1 -; PURECAP-ATOMICS-NEXT: .LBB5_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB6_1 +; PURECAP-ATOMICS-NEXT: .LBB6_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -345,13 +399,13 @@ define { i16, i1 } @test_cmpxchg_weak_i16(ptr addrspace(200) %ptr, i16 %exp, i16 ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 16 ; PURECAP-ATOMICS-NEXT: srai a1, a1, 16 -; PURECAP-ATOMICS-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.h.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB6_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB7_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.h.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB6_1 -; PURECAP-ATOMICS-NEXT: .LBB6_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB7_1 +; PURECAP-ATOMICS-NEXT: .LBB7_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -398,13 +452,13 @@ define { i16, i1 } @test_cmpxchg_weak_i16(ptr addrspace(200) %ptr, i16 %exp, i16 define { i32, i1 } @test_cmpxchg_weak_i32(ptr addrspace(200) %ptr, i32 %exp, i32 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i32: ; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.w.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB7_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB8_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.w.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB7_1 -; PURECAP-ATOMICS-NEXT: .LBB7_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB8_1 +; PURECAP-ATOMICS-NEXT: .LBB8_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -512,13 +566,13 @@ define { i64, i1 } @test_cmpxchg_weak_i64(ptr addrspace(200) %ptr, i64 %exp, i64 define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_cap: ; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB9_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB10_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB9_1 -; PURECAP-ATOMICS-NEXT: .LBB9_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB10_1 +; PURECAP-ATOMICS-NEXT: .LBB10_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 @@ -562,3 +616,57 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %pt ret { ptr addrspace(200) , i1 } %1 } +; TODO: this should use an exact equals comparison for the LL/SC +define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { +; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_cap_exact: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB11_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 +; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB11_1 +; PURECAP-ATOMICS-NEXT: .LBB11_3: +; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 +; PURECAP-ATOMICS-NEXT: seqz a1, a0 +; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: test_cmpxchg_weak_cap_exact: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 8 +; PURECAP-LIBCALLS-NEXT: li a3, 4 +; PURECAP-LIBCALLS-NEXT: li a4, 2 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap +; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: mv a2, a0 +; PURECAP-LIBCALLS-NEXT: cmove ca0, ca1 +; PURECAP-LIBCALLS-NEXT: mv a1, a2 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-LABEL: test_cmpxchg_weak_cap_exact: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sc ca1, 0(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a3, 4 +; HYBRID-NEXT: li a4, 2 +; HYBRID-NEXT: call __atomic_compare_exchange_cap_c@plt +; HYBRID-NEXT: lc ca1, 0(sp) +; HYBRID-NEXT: mv a2, a0 +; HYBRID-NEXT: cmove ca0, ca1 +; HYBRID-NEXT: mv a1, a2 +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret + %1 = cmpxchg weak exact ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 +} + diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll index 5639d60836d9..6eb6cc9b366e 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll @@ -278,18 +278,72 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) % ret { ptr addrspace(200) , i1 } %1 } +; TODO: this should use an exact equals comparison for the LL/SC +define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { +; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_cap_exact: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB5_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 +; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB5_1 +; PURECAP-ATOMICS-NEXT: .LBB5_3: +; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 +; PURECAP-ATOMICS-NEXT: seqz a1, a0 +; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: test_cmpxchg_strong_cap_exact: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 16 +; PURECAP-LIBCALLS-NEXT: li a3, 4 +; PURECAP-LIBCALLS-NEXT: li a4, 2 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap +; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: mv a2, a0 +; PURECAP-LIBCALLS-NEXT: cmove ca0, ca1 +; PURECAP-LIBCALLS-NEXT: mv a1, a2 +; PURECAP-LIBCALLS-NEXT: clc cra, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-LABEL: test_cmpxchg_strong_cap_exact: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -32 +; HYBRID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sc ca1, 0(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a3, 4 +; HYBRID-NEXT: li a4, 2 +; HYBRID-NEXT: call __atomic_compare_exchange_cap_c@plt +; HYBRID-NEXT: lc ca1, 0(sp) +; HYBRID-NEXT: mv a2, a0 +; HYBRID-NEXT: cmove ca0, ca1 +; HYBRID-NEXT: mv a1, a2 +; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 32 +; HYBRID-NEXT: ret + %1 = cmpxchg exact ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 +} + define { i8, i1 } @test_cmpxchg_weak_i8(ptr addrspace(200) %ptr, i8 %exp, i8 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i8: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 56 ; PURECAP-ATOMICS-NEXT: srai a1, a1, 56 -; PURECAP-ATOMICS-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.b.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB5_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB6_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.b.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB5_1 -; PURECAP-ATOMICS-NEXT: .LBB5_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB6_1 +; PURECAP-ATOMICS-NEXT: .LBB6_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -338,13 +392,13 @@ define { i16, i1 } @test_cmpxchg_weak_i16(ptr addrspace(200) %ptr, i16 %exp, i16 ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: slli a1, a1, 48 ; PURECAP-ATOMICS-NEXT: srai a1, a1, 48 -; PURECAP-ATOMICS-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.h.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB6_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB7_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.h.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB6_1 -; PURECAP-ATOMICS-NEXT: .LBB6_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB7_1 +; PURECAP-ATOMICS-NEXT: .LBB7_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -392,13 +446,13 @@ define { i32, i1 } @test_cmpxchg_weak_i32(ptr addrspace(200) %ptr, i32 %exp, i32 ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i32: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: sext.w a1, a1 -; PURECAP-ATOMICS-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.w.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB7_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB8_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.w.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB7_1 -; PURECAP-ATOMICS-NEXT: .LBB7_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB8_1 +; PURECAP-ATOMICS-NEXT: .LBB8_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -445,13 +499,13 @@ define { i32, i1 } @test_cmpxchg_weak_i32(ptr addrspace(200) %ptr, i32 %exp, i32 define { i64, i1 } @test_cmpxchg_weak_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i64: ; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.d.aq a3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB8_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB9_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.d.rl a4, a2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB8_1 -; PURECAP-ATOMICS-NEXT: .LBB8_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB9_1 +; PURECAP-ATOMICS-NEXT: .LBB9_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: mv a0, a3 @@ -498,13 +552,13 @@ define { i64, i1 } @test_cmpxchg_weak_i64(ptr addrspace(200) %ptr, i64 %exp, i64 define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_cap: ; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB9_3 -; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB10_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a4, .LBB9_1 -; PURECAP-ATOMICS-NEXT: .LBB9_3: +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB10_1 +; PURECAP-ATOMICS-NEXT: .LBB10_3: ; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 ; PURECAP-ATOMICS-NEXT: seqz a1, a0 ; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 @@ -548,3 +602,57 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %pt ret { ptr addrspace(200) , i1 } %1 } +; TODO: this should use an exact equals comparison for the LL/SC +define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { +; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_cap_exact: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) +; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB11_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 +; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a4, .LBB11_1 +; PURECAP-ATOMICS-NEXT: .LBB11_3: +; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 +; PURECAP-ATOMICS-NEXT: seqz a1, a0 +; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: test_cmpxchg_weak_cap_exact: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca1, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca1, 16 +; PURECAP-LIBCALLS-NEXT: li a3, 4 +; PURECAP-LIBCALLS-NEXT: li a4, 2 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_cap +; PURECAP-LIBCALLS-NEXT: clc ca1, 0(csp) +; PURECAP-LIBCALLS-NEXT: mv a2, a0 +; PURECAP-LIBCALLS-NEXT: cmove ca0, ca1 +; PURECAP-LIBCALLS-NEXT: mv a1, a2 +; PURECAP-LIBCALLS-NEXT: clc cra, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-LABEL: test_cmpxchg_weak_cap_exact: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -32 +; HYBRID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sc ca1, 0(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a3, 4 +; HYBRID-NEXT: li a4, 2 +; HYBRID-NEXT: call __atomic_compare_exchange_cap_c@plt +; HYBRID-NEXT: lc ca1, 0(sp) +; HYBRID-NEXT: mv a2, a0 +; HYBRID-NEXT: cmove ca0, ca1 +; HYBRID-NEXT: mv a1, a2 +; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 32 +; HYBRID-NEXT: ret + %1 = cmpxchg weak exact ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire + ret { ptr addrspace(200) , i1 } %1 +} + From bf0993f7de9ce7e9526b23b565a025500a10f19f Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Wed, 20 Sep 2023 19:10:31 -0700 Subject: [PATCH 05/18] [CHERI] Correctly lower cmpxchg with the exact flag If the exact flag is set on the cmpxchg instruction, we use an exact comparison in the LL/SC expansion. This will allow use of capability cmpxchg for capability-sized integers (since we have to compare all bits in that case) and will also make it possible to correctly handle the exact-equals semantics in the future. --- llvm/include/llvm/CodeGen/TargetLowering.h | 6 ++++++ llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 11 +++++++++-- llvm/lib/Target/Mips/MipsExpandPseudo.cpp | 5 ++++- llvm/lib/Target/Mips/MipsISelLowering.cpp | 9 +++++++++ llvm/lib/Target/Mips/MipsISelLowering.h | 2 ++ .../RISCV/RISCVExpandAtomicPseudoInsts.cpp | 19 +++++++++++++++---- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 9 +++++++++ llvm/lib/Target/RISCV/RISCVISelLowering.h | 2 ++ .../CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll | 2 -- .../CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll | 18 ++++++++---------- .../CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll | 14 ++++++-------- .../CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll | 14 ++++++-------- 12 files changed, 76 insertions(+), 35 deletions(-) diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index b1f4fc3b979c..efaaf45dc447 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -3117,6 +3117,12 @@ class TargetLoweringBase { } // Return true if the target has a capability set address instruction. + virtual SDValue getCapabilityEqualExact(const SDLoc &DL, SDValue LHS, + SDValue RHS, + SelectionDAG &DAG) const { + llvm_unreachable("Not implemented for this target"); + return SDValue(); + } virtual bool hasCapabilitySetAddress() const { return false; } MVT cheriCapabilityType() const { return CapType; } bool cheriCapabilityTypeHasPreciseBounds() const { diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index d790fce4e5f5..0c7e59d34e76 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2876,8 +2876,15 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { } } - SDValue Success = - DAG.getSetCC(dl, Node->getValueType(1), LHS, RHS, ISD::SETEQ); + SDValue Success; + if (OuterType.isFatPointer() && + cast(Node)->getMemOperand()->isExactCompare()) { + Success = TLI.getCapabilityEqualExact(dl, LHS, RHS, DAG); + if (Success.getValueType() != Node->getValueType(1)) + Success = DAG.getZExtOrTrunc(Success, dl, Node->getValueType(1)); + } else { + Success = DAG.getSetCC(dl, Node->getValueType(1), LHS, RHS, ISD::SETEQ); + } Results.push_back(ExtRes.getValue(0)); Results.push_back(Success); diff --git a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp index d54d117a5eb7..5c2168d9843a 100644 --- a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp +++ b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp @@ -327,7 +327,10 @@ bool MipsExpandPseudo::expandAtomicCmpSwap(MachineBasicBlock &BB, if (!IsCapOp) LLOp.addImm(0); if (IsCapCmpXchg) { - unsigned CapCmp = STI->useCheriExactEquals() ? Mips::CEXEQ : Mips::CEQ; + assert(I->hasOneMemOperand()); + bool UseExactEquals = + STI->useCheriExactEquals() || I->memoperands()[0]->isExactCompare(); + unsigned CapCmp = UseExactEquals ? Mips::CEXEQ : Mips::CEQ; // load, compare, and exit if not equal // cllc dest, ptr // ceq scratch, dest, oldval, diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index e5a4e6a0d427..d557e5ee9acb 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -1639,6 +1639,15 @@ void MipsTargetLowering::computeKnownBitsForTargetNode( } } +SDValue +MipsTargetLowering::getCapabilityEqualExact(const SDLoc &DL, llvm::SDValue LHS, + llvm::SDValue RHS, + llvm::SelectionDAG &DAG) const { + SDValue Res = DAG.getNode(MipsISD::CapEqualExact, DL, MVT::i64, LHS, RHS); + return DAG.getNode(ISD::AssertZext, DL, MVT::i64, Res, + DAG.getValueType(MVT::i1)); +} + TailPaddingAmount MipsTargetLowering::getTailPaddingForPreciseBounds(uint64_t Size) const { if (!Subtarget.isCheri()) diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h index 654f2b2952c2..dab8c6795813 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.h +++ b/llvm/lib/Target/Mips/MipsISelLowering.h @@ -404,6 +404,8 @@ extern bool LargeCapTable; // Although we don't currently have a CSetAddr, our CheriExpandIntrinsics // pass handles the intrinsic so we want to keep the intrinsic as-is. + SDValue getCapabilityEqualExact(const SDLoc &DL, SDValue LHS, SDValue RHS, + SelectionDAG &DAG) const override; bool hasCapabilitySetAddress() const override { return true; } TailPaddingAmount diff --git a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp index 0e52026b829a..d19dc6cd6916 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp @@ -1020,10 +1020,21 @@ bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg( BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(PtrIsCap, Ordering, VT)), DestReg) .addReg(AddrReg); - BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE)) - .addReg(DestIntReg, 0) - .addReg(CmpValIntReg, 0) - .addMBB(DoneMBB); + assert(MI.hasOneMemOperand()); + if (VT.isFatPointer() && MI.memoperands()[0]->isExactCompare()) { + BuildMI(LoopHeadMBB, DL, TII->get(RISCV::CSEQX), ScratchReg) + .addReg(DestReg, 0) + .addReg(CmpValReg, 0); + BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BEQ)) + .addReg(ScratchReg, 0) + .addReg(RISCV::X0, 0) + .addMBB(DoneMBB); + } else { + BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE)) + .addReg(DestIntReg, 0) + .addReg(CmpValIntReg, 0) + .addMBB(DoneMBB); + } // .looptail: // sc.[w|d] scratch, newval, (addr) // bnez scratch, loophead diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 8b5588a5515c..e0238f8a48d5 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -10121,6 +10121,15 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( return 1; } +SDValue +RISCVTargetLowering::getCapabilityEqualExact(const SDLoc &DL, llvm::SDValue LHS, + llvm::SDValue RHS, + llvm::SelectionDAG &DAG) const { + MVT VT = Subtarget.getXLenVT(); + SDValue Res = DAG.getNode(RISCVISD::CAP_EQUAL_EXACT, DL, VT, LHS, RHS); + return DAG.getNode(ISD::AssertZext, DL, VT, Res, DAG.getValueType(MVT::i1)); +} + TailPaddingAmount RISCVTargetLowering::getTailPaddingForPreciseBounds(uint64_t Size) const { if (!RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index a36c89d59681..240c50faee3f 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -730,6 +730,8 @@ class RISCVTargetLowering : public TargetLowering { SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const; SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const; + SDValue getCapabilityEqualExact(const SDLoc &DL, SDValue LHS, SDValue RHS, + SelectionDAG &DAG) const override; bool hasCapabilitySetAddress() const override { return true; } TailPaddingAmount diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll index 1485003aaf3d..77491cc088bc 100644 --- a/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-cap-ptr.ll @@ -33,7 +33,6 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) % ret { ptr addrspace(200) , i1 } %1 } -; TODO: this should use an exact equals comparison for the LL/SC define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { %1 = cmpxchg exact ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire ret { ptr addrspace(200) , i1 } %1 @@ -64,7 +63,6 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %pt ret { ptr addrspace(200) , i1 } %1 } -; TODO: this should use an exact equals comparison for the LL/SC define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { %1 = cmpxchg weak exact ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new acq_rel acquire ret { ptr addrspace(200) , i1 } %1 diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll index 72bb2c432834..498dcc461594 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-cap-ptr.ll @@ -226,14 +226,13 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) % ret { ptr addrspace(200) , i1 } %1 } -; TODO: this should use an exact equals comparison for the LL/SC define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_strong_cap_exact: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sync ; PURECAP-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: cllc $c1, $c3 -; PURECAP-NEXT: ceq $1, $c1, $c4 +; PURECAP-NEXT: cexeq $1, $c1, $c4 ; PURECAP-NEXT: beqz $1, .LBB5_3 ; PURECAP-NEXT: nop ; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 @@ -241,7 +240,7 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace( ; PURECAP-NEXT: beqz $1, .LBB5_1 ; PURECAP-NEXT: nop ; PURECAP-NEXT: .LBB5_3: -; PURECAP-NEXT: ceq $2, $c1, $c4 +; PURECAP-NEXT: cexeq $2, $c1, $c4 ; PURECAP-NEXT: sync ; PURECAP-NEXT: cjr $c17 ; PURECAP-NEXT: cmove $c3, $c1 @@ -251,7 +250,7 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace( ; HYBRID-NEXT: sync ; HYBRID-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: cllc $c1, $c3 -; HYBRID-NEXT: ceq $1, $c1, $c4 +; HYBRID-NEXT: cexeq $1, $c1, $c4 ; HYBRID-NEXT: beqz $1, .LBB5_3 ; HYBRID-NEXT: nop ; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 @@ -259,7 +258,7 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace( ; HYBRID-NEXT: beqz $1, .LBB5_1 ; HYBRID-NEXT: nop ; HYBRID-NEXT: .LBB5_3: -; HYBRID-NEXT: ceq $2, $c1, $c4 +; HYBRID-NEXT: cexeq $2, $c1, $c4 ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: cmove $c3, $c1 @@ -487,14 +486,13 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %pt ret { ptr addrspace(200) , i1 } %1 } -; TODO: this should use an exact equals comparison for the LL/SC define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-LABEL: test_cmpxchg_weak_cap_exact: ; PURECAP: # %bb.0: ; PURECAP-NEXT: sync ; PURECAP-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-NEXT: cllc $c1, $c3 -; PURECAP-NEXT: ceq $1, $c1, $c4 +; PURECAP-NEXT: cexeq $1, $c1, $c4 ; PURECAP-NEXT: beqz $1, .LBB11_3 ; PURECAP-NEXT: nop ; PURECAP-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 @@ -502,7 +500,7 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(20 ; PURECAP-NEXT: beqz $1, .LBB11_1 ; PURECAP-NEXT: nop ; PURECAP-NEXT: .LBB11_3: -; PURECAP-NEXT: ceq $2, $c1, $c4 +; PURECAP-NEXT: cexeq $2, $c1, $c4 ; PURECAP-NEXT: sync ; PURECAP-NEXT: cjr $c17 ; PURECAP-NEXT: cmove $c3, $c1 @@ -512,7 +510,7 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(20 ; HYBRID-NEXT: sync ; HYBRID-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 ; HYBRID-NEXT: cllc $c1, $c3 -; HYBRID-NEXT: ceq $1, $c1, $c4 +; HYBRID-NEXT: cexeq $1, $c1, $c4 ; HYBRID-NEXT: beqz $1, .LBB11_3 ; HYBRID-NEXT: nop ; HYBRID-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 @@ -520,7 +518,7 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(20 ; HYBRID-NEXT: beqz $1, .LBB11_1 ; HYBRID-NEXT: nop ; HYBRID-NEXT: .LBB11_3: -; HYBRID-NEXT: ceq $2, $c1, $c4 +; HYBRID-NEXT: cexeq $2, $c1, $c4 ; HYBRID-NEXT: sync ; HYBRID-NEXT: jr $ra ; HYBRID-NEXT: cmove $c3, $c1 diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll index 35f7852fb5e9..cd7e07361f17 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll @@ -285,19 +285,18 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) % ret { ptr addrspace(200) , i1 } %1 } -; TODO: this should use an exact equals comparison for the LL/SC define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_cap_exact: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB5_3 +; PURECAP-ATOMICS-NEXT: cseqx a4, ca3, ca1 +; PURECAP-ATOMICS-NEXT: beqz a4, .LBB5_3 ; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) ; PURECAP-ATOMICS-NEXT: bnez a4, .LBB5_1 ; PURECAP-ATOMICS-NEXT: .LBB5_3: -; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 -; PURECAP-ATOMICS-NEXT: seqz a1, a0 +; PURECAP-ATOMICS-NEXT: cseqx a1, ca3, ca1 ; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 ; PURECAP-ATOMICS-NEXT: cret ; @@ -616,19 +615,18 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %pt ret { ptr addrspace(200) , i1 } %1 } -; TODO: this should use an exact equals comparison for the LL/SC define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_cap_exact: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB11_3 +; PURECAP-ATOMICS-NEXT: cseqx a4, ca3, ca1 +; PURECAP-ATOMICS-NEXT: beqz a4, .LBB11_3 ; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) ; PURECAP-ATOMICS-NEXT: bnez a4, .LBB11_1 ; PURECAP-ATOMICS-NEXT: .LBB11_3: -; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 -; PURECAP-ATOMICS-NEXT: seqz a1, a0 +; PURECAP-ATOMICS-NEXT: cseqx a1, ca3, ca1 ; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 ; PURECAP-ATOMICS-NEXT: cret ; diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll index 6eb6cc9b366e..333f1a382b87 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-cap-ptr.ll @@ -278,19 +278,18 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap(ptr addrspace(200) % ret { ptr addrspace(200) , i1 } %1 } -; TODO: this should use an exact equals comparison for the LL/SC define { ptr addrspace(200) , i1 } @test_cmpxchg_strong_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_cap_exact: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB5_3 +; PURECAP-ATOMICS-NEXT: cseqx a4, ca3, ca1 +; PURECAP-ATOMICS-NEXT: beqz a4, .LBB5_3 ; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) ; PURECAP-ATOMICS-NEXT: bnez a4, .LBB5_1 ; PURECAP-ATOMICS-NEXT: .LBB5_3: -; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 -; PURECAP-ATOMICS-NEXT: seqz a1, a0 +; PURECAP-ATOMICS-NEXT: cseqx a1, ca3, ca1 ; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 ; PURECAP-ATOMICS-NEXT: cret ; @@ -602,19 +601,18 @@ define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap(ptr addrspace(200) %pt ret { ptr addrspace(200) , i1 } %1 } -; TODO: this should use an exact equals comparison for the LL/SC define { ptr addrspace(200) , i1 } @test_cmpxchg_weak_cap_exact(ptr addrspace(200) %ptr, ptr addrspace(200) %exp, ptr addrspace(200) %new) nounwind { ; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_cap_exact: ; PURECAP-ATOMICS: # %bb.0: ; PURECAP-ATOMICS-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 ; PURECAP-ATOMICS-NEXT: clr.c.aq ca3, (ca0) -; PURECAP-ATOMICS-NEXT: bne a3, a1, .LBB11_3 +; PURECAP-ATOMICS-NEXT: cseqx a4, ca3, ca1 +; PURECAP-ATOMICS-NEXT: beqz a4, .LBB11_3 ; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 ; PURECAP-ATOMICS-NEXT: csc.c.aq a4, ca2, (ca0) ; PURECAP-ATOMICS-NEXT: bnez a4, .LBB11_1 ; PURECAP-ATOMICS-NEXT: .LBB11_3: -; PURECAP-ATOMICS-NEXT: xor a0, a3, a1 -; PURECAP-ATOMICS-NEXT: seqz a1, a0 +; PURECAP-ATOMICS-NEXT: cseqx a1, ca3, ca1 ; PURECAP-ATOMICS-NEXT: cmove ca0, ca3 ; PURECAP-ATOMICS-NEXT: cret ; From 67d9eb42678815d40b82d7a695feb0926bf4fa81 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Tue, 19 Sep 2023 13:46:36 -0700 Subject: [PATCH 06/18] [CHERI] Add a baseline test for atomics on capability-size integers In order to report true for __atomic_always_lock_free(sizeof(uintptr_t), 0) we will have to also expand i128/i64 atomics using capability ops. --- .../Inputs/atomic-cap-size-int.ll | 91 + .../CHERI-Generic/MIPS/atomic-cap-size-int.ll | 1208 +++++++++++++ .../RISCV32/atomic-cap-size-int.ll | 1517 +++++++++++++++++ .../RISCV64/atomic-cap-size-int.ll | 1517 +++++++++++++++++ 4 files changed, 4333 insertions(+) create mode 100644 llvm/test/CodeGen/CHERI-Generic/Inputs/atomic-cap-size-int.ll create mode 100644 llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-cap-size-int.ll create mode 100644 llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll create mode 100644 llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/atomic-cap-size-int.ll new file mode 100644 index 000000000000..d6bfb5f22acb --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/atomic-cap-size-int.ll @@ -0,0 +1,91 @@ +;; Check that we can atomically update i128 (i64 for 32-bit systems) +;; For systems without double-width atomics (RISC-V, MIPS) we can use capability atomics +;; This is needed so we can report true for __atomic_always_lock_free(sizeof(uintptr_t), 0) +@IF-RISCV@; RUN: opt -data-layout="@PURECAP_DATALAYOUT@" @PURECAP_HARDFLOAT_ARGS@ -atomic-expand -S -mattr=+a < %s | FileCheck %s --check-prefix=PURECAP-IR +@IF-RISCV@; RUN: opt -data-layout="@HYBRID_DATALAYOUT@" @HYBRID_HARDFLOAT_ARGS@ -atomic-expand -S -mattr=+a < %s | FileCheck %s --check-prefix=HYBRID-IR +@IFNOT-RISCV@; RUN: opt @PURECAP_HARDFLOAT_ARGS@ -atomic-expand -S < %s | FileCheck %s --check-prefix=PURECAP-IR +@IFNOT-RISCV@; RUN: opt @HYBRID_HARDFLOAT_ARGS@ -atomic-expand -S < %s | FileCheck %s --check-prefix=HYBRID-IR +@IF-RISCV@; RUN: llc @PURECAP_HARDFLOAT_ARGS@ -mattr=+a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-ATOMICS --allow-unused-prefixes +@IF-RISCV@; RUN: llc @PURECAP_HARDFLOAT_ARGS@ -mattr=-a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-LIBCALLS --allow-unused-prefixes +@IFNOT-RISCV@; RUN: llc @PURECAP_HARDFLOAT_ARGS@ %s -o - | FileCheck %s --check-prefix=PURECAP +@IF-RISCV@; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc @HYBRID_HARDFLOAT_ARGS@ -mattr=+a | FileCheck %s --check-prefixes=HYBRID,HYBRID-ATOMICS --allow-unused-prefixes +@IF-RISCV@; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc @HYBRID_HARDFLOAT_ARGS@ -mattr=-a | FileCheck %s --check-prefixes=HYBRID,HYBRID-LIBCALLS --allow-unused-prefixes +@IFNOT-RISCV@; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc @HYBRID_HARDFLOAT_ARGS@ | FileCheck %s --check-prefix=HYBRID +@IF-RISCV@; RUN: llc @HYBRID_HARDFLOAT_ARGS@ -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID-CAP-PTR,HYBRID-CAP-PTR-ATOMICS --allow-unused-prefixes +@IF-RISCV@; RUN: llc @HYBRID_HARDFLOAT_ARGS@ -mattr=-a < %s | FileCheck %s --check-prefixes=HYBRID-CAP-PTR,HYBRID-CAP-PTR-LIBCALLS --allow-unused-prefixes +@IFNOT-RISCV@; RUN: llc @HYBRID_HARDFLOAT_ARGS@ %s -o - | FileCheck %s --check-prefix=HYBRID-CAP-PTR + +define i@CAP_BITS@ @store(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + store atomic i@CAP_BITS@ %val, ptr addrspace(200) %ptr seq_cst, align @CAP_BYTES@ + ret i@CAP_BITS@ %val +} + +define i@CAP_BITS@ @load(ptr addrspace(200) %ptr) nounwind { + %val = load atomic i@CAP_BITS@, ptr addrspace(200) %ptr seq_cst, align @CAP_BYTES@ + ret i@CAP_BITS@ %val +} + +define i@CAP_BITS@ @atomic_xchg(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw xchg ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define i@CAP_BITS@ @atomic_add(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw add ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define i@CAP_BITS@ @atomic_sub(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw sub ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define i@CAP_BITS@ @atomic_and(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw and ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define i@CAP_BITS@ @atomic_nand(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw nand ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define i@CAP_BITS@ @atomic_or(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw or ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define i@CAP_BITS@ @atomic_xor(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw xor ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define i@CAP_BITS@ @atomic_max(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw max ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define i@CAP_BITS@ @atomic_min(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw min ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define i@CAP_BITS@ @atomic_umax(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw umax ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define i@CAP_BITS@ @atomic_umin(ptr addrspace(200) %ptr, i@CAP_BITS@ %val) nounwind { + %tmp = atomicrmw umin ptr addrspace(200) %ptr, i@CAP_BITS@ %val seq_cst + ret i@CAP_BITS@ %tmp +} + +define { i@CAP_BITS@, i1 } @cmpxchg_weak(ptr addrspace(200) %ptr, i@CAP_BITS@ %exp, i@CAP_BITS@ %new) nounwind { + %1 = cmpxchg weak ptr addrspace(200) %ptr, i@CAP_BITS@ %exp, i@CAP_BITS@ %new acq_rel acquire + ret { i@CAP_BITS@, i1 } %1 +} + +define { i@CAP_BITS@, i1 } @cmpxchg_strong(ptr addrspace(200) %ptr, i@CAP_BITS@ %exp, i@CAP_BITS@ %new) nounwind { + %1 = cmpxchg ptr addrspace(200) %ptr, i@CAP_BITS@ %exp, i@CAP_BITS@ %new seq_cst seq_cst + ret { i@CAP_BITS@, i1 } %1 +} diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-cap-size-int.ll new file mode 100644 index 000000000000..df1c2db8b78e --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-cap-size-int.ll @@ -0,0 +1,1208 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/atomic-cap-size-int.ll +;; Check that we can atomically update i128 (i64 for 32-bit systems) +;; For systems without double-width atomics (RISC-V, MIPS) we can use capability atomics +;; This is needed so we can report true for __atomic_always_lock_free(sizeof(uintptr_t), 0) +; RUN: opt -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap -atomic-expand -S < %s | FileCheck %s --check-prefix=PURECAP-IR +; RUN: opt -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 -atomic-expand -S < %s | FileCheck %s --check-prefix=HYBRID-IR +; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - | FileCheck %s --check-prefix=PURECAP +; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 | FileCheck %s --check-prefix=HYBRID +; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 %s -o - | FileCheck %s --check-prefix=HYBRID-CAP-PTR + +define i128 @store(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: store: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -32 +; PURECAP-NEXT: csd $17, $zero, 24($c11) # 8-byte Folded Spill +; PURECAP-NEXT: csd $16, $zero, 16($c11) # 8-byte Folded Spill +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: move $16, $5 +; PURECAP-NEXT: move $17, $4 +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_lock_test_and_set_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: move $2, $17 +; PURECAP-NEXT: move $3, $16 +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cld $16, $zero, 16($c11) # 8-byte Folded Reload +; PURECAP-NEXT: cld $17, $zero, 24($c11) # 8-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 32 +; +; HYBRID-LABEL: store: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -32 +; HYBRID-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 16($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $17, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: move $16, $6 +; HYBRID-NEXT: move $17, $5 +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(store))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(store))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_lock_test_and_set_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp0, R_MIPS_JALR, __sync_lock_test_and_set_16 +; HYBRID-NEXT: .Ltmp0: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: move $2, $17 +; HYBRID-NEXT: move $3, $16 +; HYBRID-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $17, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $gp, 16($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 32 +; +; HYBRID-CAP-PTR-LABEL: store: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -32 +; HYBRID-CAP-PTR-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 16($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $17, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: move $16, $5 +; HYBRID-CAP-PTR-NEXT: move $17, $4 +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(store))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(store))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_lock_test_and_set_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp0, R_MIPS_JALR, __sync_lock_test_and_set_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp0: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: move $2, $17 +; HYBRID-CAP-PTR-NEXT: move $3, $16 +; HYBRID-CAP-PTR-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $17, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $gp, 16($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 32 +; PURECAP-IR-LABEL: define {{[^@]+}}@store +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: store atomic i128 [[VAL]], ptr addrspace(200) [[PTR]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[VAL]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@store +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: store atomic i128 [[VAL]], ptr addrspace(200) [[PTR]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[VAL]] +; + store atomic i128 %val, ptr addrspace(200) %ptr seq_cst, align 16 + ret i128 %val +} + +define i128 @load(ptr addrspace(200) %ptr) nounwind { +; PURECAP-LABEL: load: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_val_compare_and_swap_16)($c1) +; PURECAP-NEXT: daddiu $4, $zero, 0 +; PURECAP-NEXT: daddiu $5, $zero, 0 +; PURECAP-NEXT: daddiu $6, $zero, 0 +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: daddiu $7, $zero, 0 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: load: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(load))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(load))) +; HYBRID-NEXT: ld $25, %call16(__sync_val_compare_and_swap_16)($gp) +; HYBRID-NEXT: daddiu $5, $zero, 0 +; HYBRID-NEXT: daddiu $6, $zero, 0 +; HYBRID-NEXT: daddiu $7, $zero, 0 +; HYBRID-NEXT: .reloc .Ltmp1, R_MIPS_JALR, __sync_val_compare_and_swap_16 +; HYBRID-NEXT: .Ltmp1: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: daddiu $8, $zero, 0 +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: load: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(load))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(load))) +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_val_compare_and_swap_16)($gp) +; HYBRID-CAP-PTR-NEXT: daddiu $4, $zero, 0 +; HYBRID-CAP-PTR-NEXT: daddiu $5, $zero, 0 +; HYBRID-CAP-PTR-NEXT: daddiu $6, $zero, 0 +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp1, R_MIPS_JALR, __sync_val_compare_and_swap_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp1: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: daddiu $7, $zero, 0 +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@load +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[VAL:%.*]] = load atomic i128, ptr addrspace(200) [[PTR]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[VAL]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@load +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[VAL:%.*]] = load atomic i128, ptr addrspace(200) [[PTR]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[VAL]] +; + %val = load atomic i128, ptr addrspace(200) %ptr seq_cst, align 16 + ret i128 %val +} + +define i128 @atomic_xchg(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_xchg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_lock_test_and_set_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_xchg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_xchg))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_xchg))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_lock_test_and_set_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp2, R_MIPS_JALR, __sync_lock_test_and_set_16 +; HYBRID-NEXT: .Ltmp2: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_xchg: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_xchg))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_xchg))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_lock_test_and_set_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp2, R_MIPS_JALR, __sync_lock_test_and_set_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp2: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xchg +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw xchg ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xchg +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw xchg ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw xchg ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_add(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_add: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_fetch_and_add_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_add: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_add))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_add))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_fetch_and_add_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp3, R_MIPS_JALR, __sync_fetch_and_add_16 +; HYBRID-NEXT: .Ltmp3: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_add: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_add))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_add))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_fetch_and_add_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp3, R_MIPS_JALR, __sync_fetch_and_add_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp3: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_add +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw add ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_add +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw add ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw add ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_sub(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_sub: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_fetch_and_sub_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_sub: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_sub))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_sub))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_fetch_and_sub_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp4, R_MIPS_JALR, __sync_fetch_and_sub_16 +; HYBRID-NEXT: .Ltmp4: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_sub: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_sub))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_sub))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_fetch_and_sub_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp4, R_MIPS_JALR, __sync_fetch_and_sub_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp4: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_sub +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw sub ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_sub +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw sub ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw sub ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_and(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_and: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_fetch_and_and_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_and: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_and))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_and))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_fetch_and_and_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp5, R_MIPS_JALR, __sync_fetch_and_and_16 +; HYBRID-NEXT: .Ltmp5: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_and: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_and))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_and))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_fetch_and_and_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp5, R_MIPS_JALR, __sync_fetch_and_and_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp5: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_and +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw and ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_and +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw and ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw and ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_nand(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_nand: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_fetch_and_nand_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_nand: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_nand))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_nand))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_fetch_and_nand_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp6, R_MIPS_JALR, __sync_fetch_and_nand_16 +; HYBRID-NEXT: .Ltmp6: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_nand: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_nand))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_nand))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_fetch_and_nand_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp6, R_MIPS_JALR, __sync_fetch_and_nand_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp6: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_nand +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw nand ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_nand +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw nand ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw nand ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_or(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_or: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_fetch_and_or_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_or: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_or))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_or))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_fetch_and_or_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp7, R_MIPS_JALR, __sync_fetch_and_or_16 +; HYBRID-NEXT: .Ltmp7: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_or: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_or))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_or))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_fetch_and_or_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp7, R_MIPS_JALR, __sync_fetch_and_or_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp7: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_or +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw or ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_or +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw or ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw or ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_xor(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_xor: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_fetch_and_xor_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_xor: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_xor))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_xor))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_fetch_and_xor_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp8, R_MIPS_JALR, __sync_fetch_and_xor_16 +; HYBRID-NEXT: .Ltmp8: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_xor: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_xor))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_xor))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_fetch_and_xor_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp8, R_MIPS_JALR, __sync_fetch_and_xor_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp8: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xor +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw xor ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xor +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw xor ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw xor ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_max(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_max: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_fetch_and_max_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_max: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_max))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_max))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_fetch_and_max_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp9, R_MIPS_JALR, __sync_fetch_and_max_16 +; HYBRID-NEXT: .Ltmp9: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_max: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_max))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_max))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_fetch_and_max_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp9, R_MIPS_JALR, __sync_fetch_and_max_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp9: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_max +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw max ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_max +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw max ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw max ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_min(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_min: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_fetch_and_min_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_min: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_min))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_min))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_fetch_and_min_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp10, R_MIPS_JALR, __sync_fetch_and_min_16 +; HYBRID-NEXT: .Ltmp10: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_min: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_min))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_min))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_fetch_and_min_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp10, R_MIPS_JALR, __sync_fetch_and_min_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp10: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_min +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw min ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_min +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw min ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw min ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_umax(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_umax: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_fetch_and_umax_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_umax: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_umax))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_umax))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_fetch_and_umax_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp11, R_MIPS_JALR, __sync_fetch_and_umax_16 +; HYBRID-NEXT: .Ltmp11: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_umax: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_umax))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_umax))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_fetch_and_umax_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp11, R_MIPS_JALR, __sync_fetch_and_umax_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp11: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_umax +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw umax ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_umax +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw umax ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw umax ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_umin(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_umin: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -16 +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_fetch_and_umin_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 16 +; +; HYBRID-LABEL: atomic_umin: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -16 +; HYBRID-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_umin))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_umin))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_fetch_and_umin_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp12, R_MIPS_JALR, __sync_fetch_and_umin_16 +; HYBRID-NEXT: .Ltmp12: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 16 +; +; HYBRID-CAP-PTR-LABEL: atomic_umin: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -16 +; HYBRID-CAP-PTR-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(atomic_umin))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(atomic_umin))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_fetch_and_umin_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp12, R_MIPS_JALR, __sync_fetch_and_umin_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp12: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $gp, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 16 +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_umin +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP:%.*]] = atomicrmw umin ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret i128 [[TMP]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_umin +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP:%.*]] = atomicrmw umin ptr addrspace(200) [[PTR]], i128 [[VAL]] monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret i128 [[TMP]] +; + %tmp = atomicrmw umin ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define { i128, i1 } @cmpxchg_weak(ptr addrspace(200) %ptr, i128 %exp, i128 %new) nounwind { +; PURECAP-LABEL: cmpxchg_weak: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -32 +; PURECAP-NEXT: csd $17, $zero, 24($c11) # 8-byte Folded Spill +; PURECAP-NEXT: csd $16, $zero, 16($c11) # 8-byte Folded Spill +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: move $16, $5 +; PURECAP-NEXT: move $17, $4 +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_val_compare_and_swap_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: xor $1, $2, $17 +; PURECAP-NEXT: xor $4, $3, $16 +; PURECAP-NEXT: or $1, $4, $1 +; PURECAP-NEXT: sltiu $4, $1, 1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cld $16, $zero, 16($c11) # 8-byte Folded Reload +; PURECAP-NEXT: cld $17, $zero, 24($c11) # 8-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 32 +; +; HYBRID-LABEL: cmpxchg_weak: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -32 +; HYBRID-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 16($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $17, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: move $16, $6 +; HYBRID-NEXT: move $17, $5 +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(cmpxchg_weak))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(cmpxchg_weak))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_val_compare_and_swap_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp13, R_MIPS_JALR, __sync_val_compare_and_swap_16 +; HYBRID-NEXT: .Ltmp13: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: xor $1, $2, $17 +; HYBRID-NEXT: xor $4, $3, $16 +; HYBRID-NEXT: or $1, $4, $1 +; HYBRID-NEXT: sltiu $4, $1, 1 +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $17, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $gp, 16($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 32 +; +; HYBRID-CAP-PTR-LABEL: cmpxchg_weak: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -32 +; HYBRID-CAP-PTR-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 16($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $17, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: move $16, $5 +; HYBRID-CAP-PTR-NEXT: move $17, $4 +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(cmpxchg_weak))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(cmpxchg_weak))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_val_compare_and_swap_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp13, R_MIPS_JALR, __sync_val_compare_and_swap_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp13: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: xor $1, $2, $17 +; HYBRID-CAP-PTR-NEXT: xor $4, $3, $16 +; HYBRID-CAP-PTR-NEXT: or $1, $4, $1 +; HYBRID-CAP-PTR-NEXT: sltiu $4, $1, 1 +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $17, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $gp, 16($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 32 +; PURECAP-IR-LABEL: define {{[^@]+}}@cmpxchg_weak +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence acq_rel +; PURECAP-IR-NEXT: [[TMP1:%.*]] = cmpxchg weak ptr addrspace(200) [[PTR]], i128 [[EXP]], i128 [[NEW]] monotonic monotonic, align 16 +; PURECAP-IR-NEXT: fence acq_rel +; PURECAP-IR-NEXT: ret { i128, i1 } [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@cmpxchg_weak +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence acq_rel +; HYBRID-IR-NEXT: [[TMP1:%.*]] = cmpxchg weak ptr addrspace(200) [[PTR]], i128 [[EXP]], i128 [[NEW]] monotonic monotonic, align 16 +; HYBRID-IR-NEXT: fence acq_rel +; HYBRID-IR-NEXT: ret { i128, i1 } [[TMP1]] +; + %1 = cmpxchg weak ptr addrspace(200) %ptr, i128 %exp, i128 %new acq_rel acquire + ret { i128, i1 } %1 +} + +define { i128, i1 } @cmpxchg_strong(ptr addrspace(200) %ptr, i128 %exp, i128 %new) nounwind { +; PURECAP-LABEL: cmpxchg_strong: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset $c11, $c11, -32 +; PURECAP-NEXT: csd $17, $zero, 24($c11) # 8-byte Folded Spill +; PURECAP-NEXT: csd $16, $zero, 16($c11) # 8-byte Folded Spill +; PURECAP-NEXT: csc $c17, $zero, 0($c11) # 16-byte Folded Spill +; PURECAP-NEXT: move $16, $5 +; PURECAP-NEXT: move $17, $4 +; PURECAP-NEXT: lui $1, %pcrel_hi(_CHERI_CAPABILITY_TABLE_-8) +; PURECAP-NEXT: daddiu $1, $1, %pcrel_lo(_CHERI_CAPABILITY_TABLE_-4) +; PURECAP-NEXT: cgetpccincoffset $c1, $1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clcbi $c12, %capcall20(__sync_val_compare_and_swap_16)($c1) +; PURECAP-NEXT: cjalr $c12, $c17 +; PURECAP-NEXT: nop +; PURECAP-NEXT: xor $1, $2, $17 +; PURECAP-NEXT: xor $4, $3, $16 +; PURECAP-NEXT: or $1, $4, $1 +; PURECAP-NEXT: sltiu $4, $1, 1 +; PURECAP-NEXT: sync +; PURECAP-NEXT: clc $c17, $zero, 0($c11) # 16-byte Folded Reload +; PURECAP-NEXT: cld $16, $zero, 16($c11) # 8-byte Folded Reload +; PURECAP-NEXT: cld $17, $zero, 24($c11) # 8-byte Folded Reload +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cincoffset $c11, $c11, 32 +; +; HYBRID-LABEL: cmpxchg_strong: +; HYBRID: # %bb.0: +; HYBRID-NEXT: daddiu $sp, $sp, -32 +; HYBRID-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $gp, 16($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $17, 8($sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; HYBRID-NEXT: move $16, $6 +; HYBRID-NEXT: move $17, $5 +; HYBRID-NEXT: lui $1, %hi(%neg(%gp_rel(cmpxchg_strong))) +; HYBRID-NEXT: daddu $1, $1, $25 +; HYBRID-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(cmpxchg_strong))) +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $25, %call16(__sync_val_compare_and_swap_16)($gp) +; HYBRID-NEXT: .reloc .Ltmp14, R_MIPS_JALR, __sync_val_compare_and_swap_16 +; HYBRID-NEXT: .Ltmp14: +; HYBRID-NEXT: jalr $25 +; HYBRID-NEXT: nop +; HYBRID-NEXT: xor $1, $2, $17 +; HYBRID-NEXT: xor $4, $3, $16 +; HYBRID-NEXT: or $1, $4, $1 +; HYBRID-NEXT: sltiu $4, $1, 1 +; HYBRID-NEXT: sync +; HYBRID-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $17, 8($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $gp, 16($sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: daddiu $sp, $sp, 32 +; +; HYBRID-CAP-PTR-LABEL: cmpxchg_strong: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, -32 +; HYBRID-CAP-PTR-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $gp, 16($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $17, 8($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: move $16, $5 +; HYBRID-CAP-PTR-NEXT: move $17, $4 +; HYBRID-CAP-PTR-NEXT: lui $1, %hi(%neg(%gp_rel(cmpxchg_strong))) +; HYBRID-CAP-PTR-NEXT: daddu $1, $1, $25 +; HYBRID-CAP-PTR-NEXT: daddiu $gp, $1, %lo(%neg(%gp_rel(cmpxchg_strong))) +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $25, %call16(__sync_val_compare_and_swap_16)($gp) +; HYBRID-CAP-PTR-NEXT: .reloc .Ltmp14, R_MIPS_JALR, __sync_val_compare_and_swap_16 +; HYBRID-CAP-PTR-NEXT: .Ltmp14: +; HYBRID-CAP-PTR-NEXT: jalr $25 +; HYBRID-CAP-PTR-NEXT: nop +; HYBRID-CAP-PTR-NEXT: xor $1, $2, $17 +; HYBRID-CAP-PTR-NEXT: xor $4, $3, $16 +; HYBRID-CAP-PTR-NEXT: or $1, $4, $1 +; HYBRID-CAP-PTR-NEXT: sltiu $4, $1, 1 +; HYBRID-CAP-PTR-NEXT: sync +; HYBRID-CAP-PTR-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $17, 8($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $gp, 16($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: jr $ra +; HYBRID-CAP-PTR-NEXT: daddiu $sp, $sp, 32 +; PURECAP-IR-LABEL: define {{[^@]+}}@cmpxchg_strong +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) #[[ATTR0]] { +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP1:%.*]] = cmpxchg ptr addrspace(200) [[PTR]], i128 [[EXP]], i128 [[NEW]] monotonic monotonic, align 16 +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: ret { i128, i1 } [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@cmpxchg_strong +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP1:%.*]] = cmpxchg ptr addrspace(200) [[PTR]], i128 [[EXP]], i128 [[NEW]] monotonic monotonic, align 16 +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: ret { i128, i1 } [[TMP1]] +; + %1 = cmpxchg ptr addrspace(200) %ptr, i128 %exp, i128 %new seq_cst seq_cst + ret { i128, i1 } %1 +} diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll new file mode 100644 index 000000000000..f80c30bef993 --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll @@ -0,0 +1,1517 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/atomic-cap-size-int.ll +;; Check that we can atomically update i128 (i64 for 32-bit systems) +;; For systems without double-width atomics (RISC-V, MIPS) we can use capability atomics +;; This is needed so we can report true for __atomic_always_lock_free(sizeof(uintptr_t), 0) +; RUN: opt -data-layout="e-m:e-pf200:64:64:64:32-p:32:32-i64:64-n32-S128-A200-P200-G200" -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -atomic-expand -S -mattr=+a < %s | FileCheck %s --check-prefix=PURECAP-IR +; RUN: opt -data-layout="e-m:e-pf200:64:64:64:32-p:32:32-i64:64-n32-S128" -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -atomic-expand -S -mattr=+a < %s | FileCheck %s --check-prefix=HYBRID-IR +; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -mattr=+a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-ATOMICS --allow-unused-prefixes +; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -mattr=-a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-LIBCALLS --allow-unused-prefixes +; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -mattr=+a | FileCheck %s --check-prefixes=HYBRID,HYBRID-ATOMICS --allow-unused-prefixes +; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -mattr=-a | FileCheck %s --check-prefixes=HYBRID,HYBRID-LIBCALLS --allow-unused-prefixes +; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID-CAP-PTR,HYBRID-CAP-PTR-ATOMICS --allow-unused-prefixes +; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -mattr=-a < %s | FileCheck %s --check-prefixes=HYBRID-CAP-PTR,HYBRID-CAP-PTR-LIBCALLS --allow-unused-prefixes + +define i64 @store(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: store: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -32 +; PURECAP-NEXT: csc cra, 24(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs1, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: mv s0, a2 +; PURECAP-NEXT: mv s1, a1 +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_store_8 +; PURECAP-NEXT: mv a0, s1 +; PURECAP-NEXT: mv a1, s0 +; PURECAP-NEXT: clc cra, 24(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs1, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 32 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: store: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; HYBRID-NEXT: mv s0, a2 +; HYBRID-NEXT: mv s1, a1 +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_store_8@plt +; HYBRID-NEXT: mv a0, s1 +; HYBRID-NEXT: mv a1, s0 +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: store: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_store_8_c@plt +; HYBRID-CAP-PTR-NEXT: mv a0, s1 +; HYBRID-CAP-PTR-NEXT: mv a1, s0 +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@store +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { +; PURECAP-IR-NEXT: call void @__atomic_store_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i64 [[VAL]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@store +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { +; HYBRID-IR-NEXT: call void @__atomic_store_8_c(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i64 [[VAL]] +; + store atomic i64 %val, ptr addrspace(200) %ptr seq_cst, align 8 + ret i64 %val +} + +define i64 @load(ptr addrspace(200) %ptr) nounwind { +; PURECAP-LABEL: load: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: li a1, 5 +; PURECAP-NEXT: ccall __atomic_load_8 +; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: load: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: li a1, 5 +; HYBRID-NEXT: call __atomic_load_8@plt +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: load: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a1, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_load_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@load +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_load_8(ptr addrspace(200) [[PTR]], i32 5) +; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@load +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_load_8_c(ptr addrspace(200) [[PTR]], i32 5) +; HYBRID-IR-NEXT: ret i64 [[TMP1]] +; + %val = load atomic i64, ptr addrspace(200) %ptr seq_cst, align 8 + ret i64 %val +} + +define i64 @atomic_xchg(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_xchg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_exchange_8 +; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_xchg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_exchange_8@plt +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_xchg: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_exchange_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xchg +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_exchange_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xchg +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_exchange_8_c(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i64 [[TMP1]] +; + %tmp = atomicrmw xchg ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define i64 @atomic_add(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_add: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_add_8 +; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_add: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_add_8@plt +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_add: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_add_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_add +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_add_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_add +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_add_8_c(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i64 [[TMP1]] +; + %tmp = atomicrmw add ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define i64 @atomic_sub(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_sub: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_sub_8 +; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_sub: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_sub_8@plt +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_sub: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_sub_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_sub +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_sub_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_sub +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_sub_8_c(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i64 [[TMP1]] +; + %tmp = atomicrmw sub ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define i64 @atomic_and(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_and: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_and_8 +; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_and: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_and_8@plt +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_and: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_and_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_and +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_and_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_and +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_and_8_c(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i64 [[TMP1]] +; + %tmp = atomicrmw and ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define i64 @atomic_nand(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_nand: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_nand_8 +; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_nand: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_nand_8@plt +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_nand: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_nand_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_nand +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_nand_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_nand +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_nand_8_c(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i64 [[TMP1]] +; + %tmp = atomicrmw nand ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define i64 @atomic_or(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_or: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_or_8 +; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_or: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_or_8@plt +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_or: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_or_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_or +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_or_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_or +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_or_8_c(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i64 [[TMP1]] +; + %tmp = atomicrmw or ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define i64 @atomic_xor(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_xor: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_xor_8 +; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_xor: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_xor_8@plt +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_xor: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_xor_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xor +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_xor_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xor +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_xor_8_c(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i64 [[TMP1]] +; + %tmp = atomicrmw xor ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define i64 @atomic_max(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_max: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -48 +; PURECAP-NEXT: csc cra, 40(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: cmove cs3, ca0 +; PURECAP-NEXT: clw a5, 4(ca0) +; PURECAP-NEXT: clw a4, 0(ca0) +; PURECAP-NEXT: mv s1, a2 +; PURECAP-NEXT: mv s2, a1 +; PURECAP-NEXT: cincoffset ca0, csp, 0 +; PURECAP-NEXT: csetbounds cs0, ca0, 8 +; PURECAP-NEXT: j .LBB9_2 +; PURECAP-NEXT: .LBB9_1: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-NEXT: csw a4, 0(csp) +; PURECAP-NEXT: csw a5, 4(csp) +; PURECAP-NEXT: li a4, 5 +; PURECAP-NEXT: li a5, 5 +; PURECAP-NEXT: cmove ca0, cs3 +; PURECAP-NEXT: cmove ca1, cs0 +; PURECAP-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-NEXT: clw a5, 4(csp) +; PURECAP-NEXT: clw a4, 0(csp) +; PURECAP-NEXT: bnez a0, .LBB9_7 +; PURECAP-NEXT: .LBB9_2: # %atomicrmw.start +; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: beq a5, s1, .LBB9_4 +; PURECAP-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-NEXT: slt a0, s1, a5 +; PURECAP-NEXT: j .LBB9_5 +; PURECAP-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-NEXT: sltu a0, s2, a4 +; PURECAP-NEXT: .LBB9_5: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-NEXT: mv a2, a4 +; PURECAP-NEXT: mv a3, a5 +; PURECAP-NEXT: bnez a0, .LBB9_1 +; PURECAP-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-NEXT: mv a2, s2 +; PURECAP-NEXT: mv a3, s1 +; PURECAP-NEXT: j .LBB9_1 +; PURECAP-NEXT: .LBB9_7: # %atomicrmw.end +; PURECAP-NEXT: mv a0, a4 +; PURECAP-NEXT: mv a1, a5 +; PURECAP-NEXT: clc cra, 40(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 48 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_max: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -32 +; HYBRID-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: lw a5, 4(a0) +; HYBRID-NEXT: lw a4, 0(a0) +; HYBRID-NEXT: mv s1, a2 +; HYBRID-NEXT: mv s2, a1 +; HYBRID-NEXT: j .LBB9_2 +; HYBRID-NEXT: .LBB9_1: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-NEXT: sw a4, 8(sp) +; HYBRID-NEXT: sw a5, 12(sp) +; HYBRID-NEXT: addi a1, sp, 8 +; HYBRID-NEXT: li a4, 5 +; HYBRID-NEXT: li a5, 5 +; HYBRID-NEXT: mv a0, s0 +; HYBRID-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-NEXT: lw a5, 12(sp) +; HYBRID-NEXT: lw a4, 8(sp) +; HYBRID-NEXT: bnez a0, .LBB9_7 +; HYBRID-NEXT: .LBB9_2: # %atomicrmw.start +; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: beq a5, s1, .LBB9_4 +; HYBRID-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-NEXT: slt a0, s1, a5 +; HYBRID-NEXT: j .LBB9_5 +; HYBRID-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-NEXT: sltu a0, s2, a4 +; HYBRID-NEXT: .LBB9_5: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-NEXT: mv a2, a4 +; HYBRID-NEXT: mv a3, a5 +; HYBRID-NEXT: bnez a0, .LBB9_1 +; HYBRID-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-NEXT: mv a2, s2 +; HYBRID-NEXT: mv a3, s1 +; HYBRID-NEXT: j .LBB9_1 +; HYBRID-NEXT: .LBB9_7: # %atomicrmw.end +; HYBRID-NEXT: mv a0, a4 +; HYBRID-NEXT: mv a1, a5 +; HYBRID-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 32 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_max: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -32 +; HYBRID-CAP-PTR-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lw.cap a4, (ca0) +; HYBRID-CAP-PTR-NEXT: sc ca0, 0(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: cincoffset ca0, ca0, 4 +; HYBRID-CAP-PTR-NEXT: lw.cap a5, (ca0) +; HYBRID-CAP-PTR-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-NEXT: j .LBB9_2 +; HYBRID-CAP-PTR-NEXT: .LBB9_1: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sw a4, 8(sp) +; HYBRID-CAP-PTR-NEXT: sw a5, 12(sp) +; HYBRID-CAP-PTR-NEXT: addi a1, sp, 8 +; HYBRID-CAP-PTR-NEXT: li a4, 5 +; HYBRID-CAP-PTR-NEXT: li a5, 5 +; HYBRID-CAP-PTR-NEXT: lc ca0, 0(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw a5, 12(sp) +; HYBRID-CAP-PTR-NEXT: lw a4, 8(sp) +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB9_7 +; HYBRID-CAP-PTR-NEXT: .LBB9_2: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-CAP-PTR-NEXT: beq a5, s0, .LBB9_4 +; HYBRID-CAP-PTR-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: slt a0, s0, a5 +; HYBRID-CAP-PTR-NEXT: j .LBB9_5 +; HYBRID-CAP-PTR-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s1, a4 +; HYBRID-CAP-PTR-NEXT: .LBB9_5: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, a4 +; HYBRID-CAP-PTR-NEXT: mv a3, a5 +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB9_1 +; HYBRID-CAP-PTR-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, s1 +; HYBRID-CAP-PTR-NEXT: mv a3, s0 +; HYBRID-CAP-PTR-NEXT: j .LBB9_1 +; HYBRID-CAP-PTR-NEXT: .LBB9_7: # %atomicrmw.end +; HYBRID-CAP-PTR-NEXT: mv a0, a4 +; HYBRID-CAP-PTR-NEXT: mv a1, a5 +; HYBRID-CAP-PTR-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 32 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_max +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) +; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp sgt i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i64 [[LOADED]], ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 5, i32 5) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_max +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; HYBRID-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; HYBRID-IR: atomicrmw.start: +; HYBRID-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; HYBRID-IR-NEXT: [[TMP3:%.*]] = icmp sgt i64 [[LOADED]], [[VAL]] +; HYBRID-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i64 [[LOADED]], ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i64 [[NEW]], i32 5, i32 5) +; HYBRID-IR-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 +; HYBRID-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 +; HYBRID-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 +; HYBRID-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; HYBRID-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; HYBRID-IR: atomicrmw.end: +; HYBRID-IR-NEXT: ret i64 [[NEWLOADED]] +; + %tmp = atomicrmw max ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define i64 @atomic_min(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_min: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -48 +; PURECAP-NEXT: csc cra, 40(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: cmove cs3, ca0 +; PURECAP-NEXT: clw a5, 4(ca0) +; PURECAP-NEXT: clw a4, 0(ca0) +; PURECAP-NEXT: mv s1, a2 +; PURECAP-NEXT: mv s2, a1 +; PURECAP-NEXT: cincoffset ca0, csp, 0 +; PURECAP-NEXT: csetbounds cs0, ca0, 8 +; PURECAP-NEXT: j .LBB10_2 +; PURECAP-NEXT: .LBB10_1: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-NEXT: csw a4, 0(csp) +; PURECAP-NEXT: csw a5, 4(csp) +; PURECAP-NEXT: li a4, 5 +; PURECAP-NEXT: li a5, 5 +; PURECAP-NEXT: cmove ca0, cs3 +; PURECAP-NEXT: cmove ca1, cs0 +; PURECAP-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-NEXT: clw a5, 4(csp) +; PURECAP-NEXT: clw a4, 0(csp) +; PURECAP-NEXT: bnez a0, .LBB10_7 +; PURECAP-NEXT: .LBB10_2: # %atomicrmw.start +; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: beq a5, s1, .LBB10_4 +; PURECAP-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-NEXT: slt a0, s1, a5 +; PURECAP-NEXT: j .LBB10_5 +; PURECAP-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-NEXT: sltu a0, s2, a4 +; PURECAP-NEXT: .LBB10_5: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-NEXT: xori a0, a0, 1 +; PURECAP-NEXT: mv a2, a4 +; PURECAP-NEXT: mv a3, a5 +; PURECAP-NEXT: bnez a0, .LBB10_1 +; PURECAP-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-NEXT: mv a2, s2 +; PURECAP-NEXT: mv a3, s1 +; PURECAP-NEXT: j .LBB10_1 +; PURECAP-NEXT: .LBB10_7: # %atomicrmw.end +; PURECAP-NEXT: mv a0, a4 +; PURECAP-NEXT: mv a1, a5 +; PURECAP-NEXT: clc cra, 40(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 48 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_min: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -32 +; HYBRID-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: lw a5, 4(a0) +; HYBRID-NEXT: lw a4, 0(a0) +; HYBRID-NEXT: mv s1, a2 +; HYBRID-NEXT: mv s2, a1 +; HYBRID-NEXT: j .LBB10_2 +; HYBRID-NEXT: .LBB10_1: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-NEXT: sw a4, 8(sp) +; HYBRID-NEXT: sw a5, 12(sp) +; HYBRID-NEXT: addi a1, sp, 8 +; HYBRID-NEXT: li a4, 5 +; HYBRID-NEXT: li a5, 5 +; HYBRID-NEXT: mv a0, s0 +; HYBRID-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-NEXT: lw a5, 12(sp) +; HYBRID-NEXT: lw a4, 8(sp) +; HYBRID-NEXT: bnez a0, .LBB10_7 +; HYBRID-NEXT: .LBB10_2: # %atomicrmw.start +; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: beq a5, s1, .LBB10_4 +; HYBRID-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-NEXT: slt a0, s1, a5 +; HYBRID-NEXT: j .LBB10_5 +; HYBRID-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-NEXT: sltu a0, s2, a4 +; HYBRID-NEXT: .LBB10_5: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-NEXT: xori a0, a0, 1 +; HYBRID-NEXT: mv a2, a4 +; HYBRID-NEXT: mv a3, a5 +; HYBRID-NEXT: bnez a0, .LBB10_1 +; HYBRID-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-NEXT: mv a2, s2 +; HYBRID-NEXT: mv a3, s1 +; HYBRID-NEXT: j .LBB10_1 +; HYBRID-NEXT: .LBB10_7: # %atomicrmw.end +; HYBRID-NEXT: mv a0, a4 +; HYBRID-NEXT: mv a1, a5 +; HYBRID-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 32 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_min: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -32 +; HYBRID-CAP-PTR-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lw.cap a4, (ca0) +; HYBRID-CAP-PTR-NEXT: sc ca0, 0(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: cincoffset ca0, ca0, 4 +; HYBRID-CAP-PTR-NEXT: lw.cap a5, (ca0) +; HYBRID-CAP-PTR-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-NEXT: j .LBB10_2 +; HYBRID-CAP-PTR-NEXT: .LBB10_1: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sw a4, 8(sp) +; HYBRID-CAP-PTR-NEXT: sw a5, 12(sp) +; HYBRID-CAP-PTR-NEXT: addi a1, sp, 8 +; HYBRID-CAP-PTR-NEXT: li a4, 5 +; HYBRID-CAP-PTR-NEXT: li a5, 5 +; HYBRID-CAP-PTR-NEXT: lc ca0, 0(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw a5, 12(sp) +; HYBRID-CAP-PTR-NEXT: lw a4, 8(sp) +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB10_7 +; HYBRID-CAP-PTR-NEXT: .LBB10_2: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-CAP-PTR-NEXT: beq a5, s0, .LBB10_4 +; HYBRID-CAP-PTR-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: slt a0, s0, a5 +; HYBRID-CAP-PTR-NEXT: j .LBB10_5 +; HYBRID-CAP-PTR-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s1, a4 +; HYBRID-CAP-PTR-NEXT: .LBB10_5: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: xori a0, a0, 1 +; HYBRID-CAP-PTR-NEXT: mv a2, a4 +; HYBRID-CAP-PTR-NEXT: mv a3, a5 +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB10_1 +; HYBRID-CAP-PTR-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, s1 +; HYBRID-CAP-PTR-NEXT: mv a3, s0 +; HYBRID-CAP-PTR-NEXT: j .LBB10_1 +; HYBRID-CAP-PTR-NEXT: .LBB10_7: # %atomicrmw.end +; HYBRID-CAP-PTR-NEXT: mv a0, a4 +; HYBRID-CAP-PTR-NEXT: mv a1, a5 +; HYBRID-CAP-PTR-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 32 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_min +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) +; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp sle i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i64 [[LOADED]], ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 5, i32 5) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_min +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; HYBRID-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; HYBRID-IR: atomicrmw.start: +; HYBRID-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; HYBRID-IR-NEXT: [[TMP3:%.*]] = icmp sle i64 [[LOADED]], [[VAL]] +; HYBRID-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i64 [[LOADED]], ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i64 [[NEW]], i32 5, i32 5) +; HYBRID-IR-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 +; HYBRID-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 +; HYBRID-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 +; HYBRID-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; HYBRID-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; HYBRID-IR: atomicrmw.end: +; HYBRID-IR-NEXT: ret i64 [[NEWLOADED]] +; + %tmp = atomicrmw min ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define i64 @atomic_umax(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_umax: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -48 +; PURECAP-NEXT: csc cra, 40(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: cmove cs3, ca0 +; PURECAP-NEXT: clw a5, 4(ca0) +; PURECAP-NEXT: clw a4, 0(ca0) +; PURECAP-NEXT: mv s1, a2 +; PURECAP-NEXT: mv s2, a1 +; PURECAP-NEXT: cincoffset ca0, csp, 0 +; PURECAP-NEXT: csetbounds cs0, ca0, 8 +; PURECAP-NEXT: j .LBB11_2 +; PURECAP-NEXT: .LBB11_1: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-NEXT: csw a4, 0(csp) +; PURECAP-NEXT: csw a5, 4(csp) +; PURECAP-NEXT: li a4, 5 +; PURECAP-NEXT: li a5, 5 +; PURECAP-NEXT: cmove ca0, cs3 +; PURECAP-NEXT: cmove ca1, cs0 +; PURECAP-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-NEXT: clw a5, 4(csp) +; PURECAP-NEXT: clw a4, 0(csp) +; PURECAP-NEXT: bnez a0, .LBB11_7 +; PURECAP-NEXT: .LBB11_2: # %atomicrmw.start +; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: beq a5, s1, .LBB11_4 +; PURECAP-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-NEXT: sltu a0, s1, a5 +; PURECAP-NEXT: j .LBB11_5 +; PURECAP-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-NEXT: sltu a0, s2, a4 +; PURECAP-NEXT: .LBB11_5: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-NEXT: mv a2, a4 +; PURECAP-NEXT: mv a3, a5 +; PURECAP-NEXT: bnez a0, .LBB11_1 +; PURECAP-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-NEXT: mv a2, s2 +; PURECAP-NEXT: mv a3, s1 +; PURECAP-NEXT: j .LBB11_1 +; PURECAP-NEXT: .LBB11_7: # %atomicrmw.end +; PURECAP-NEXT: mv a0, a4 +; PURECAP-NEXT: mv a1, a5 +; PURECAP-NEXT: clc cra, 40(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 48 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_umax: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -32 +; HYBRID-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: lw a5, 4(a0) +; HYBRID-NEXT: lw a4, 0(a0) +; HYBRID-NEXT: mv s1, a2 +; HYBRID-NEXT: mv s2, a1 +; HYBRID-NEXT: j .LBB11_2 +; HYBRID-NEXT: .LBB11_1: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-NEXT: sw a4, 8(sp) +; HYBRID-NEXT: sw a5, 12(sp) +; HYBRID-NEXT: addi a1, sp, 8 +; HYBRID-NEXT: li a4, 5 +; HYBRID-NEXT: li a5, 5 +; HYBRID-NEXT: mv a0, s0 +; HYBRID-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-NEXT: lw a5, 12(sp) +; HYBRID-NEXT: lw a4, 8(sp) +; HYBRID-NEXT: bnez a0, .LBB11_7 +; HYBRID-NEXT: .LBB11_2: # %atomicrmw.start +; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: beq a5, s1, .LBB11_4 +; HYBRID-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-NEXT: sltu a0, s1, a5 +; HYBRID-NEXT: j .LBB11_5 +; HYBRID-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-NEXT: sltu a0, s2, a4 +; HYBRID-NEXT: .LBB11_5: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-NEXT: mv a2, a4 +; HYBRID-NEXT: mv a3, a5 +; HYBRID-NEXT: bnez a0, .LBB11_1 +; HYBRID-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-NEXT: mv a2, s2 +; HYBRID-NEXT: mv a3, s1 +; HYBRID-NEXT: j .LBB11_1 +; HYBRID-NEXT: .LBB11_7: # %atomicrmw.end +; HYBRID-NEXT: mv a0, a4 +; HYBRID-NEXT: mv a1, a5 +; HYBRID-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 32 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_umax: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -32 +; HYBRID-CAP-PTR-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lw.cap a4, (ca0) +; HYBRID-CAP-PTR-NEXT: sc ca0, 0(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: cincoffset ca0, ca0, 4 +; HYBRID-CAP-PTR-NEXT: lw.cap a5, (ca0) +; HYBRID-CAP-PTR-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-NEXT: j .LBB11_2 +; HYBRID-CAP-PTR-NEXT: .LBB11_1: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sw a4, 8(sp) +; HYBRID-CAP-PTR-NEXT: sw a5, 12(sp) +; HYBRID-CAP-PTR-NEXT: addi a1, sp, 8 +; HYBRID-CAP-PTR-NEXT: li a4, 5 +; HYBRID-CAP-PTR-NEXT: li a5, 5 +; HYBRID-CAP-PTR-NEXT: lc ca0, 0(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw a5, 12(sp) +; HYBRID-CAP-PTR-NEXT: lw a4, 8(sp) +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB11_7 +; HYBRID-CAP-PTR-NEXT: .LBB11_2: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-CAP-PTR-NEXT: beq a5, s0, .LBB11_4 +; HYBRID-CAP-PTR-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s0, a5 +; HYBRID-CAP-PTR-NEXT: j .LBB11_5 +; HYBRID-CAP-PTR-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s1, a4 +; HYBRID-CAP-PTR-NEXT: .LBB11_5: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, a4 +; HYBRID-CAP-PTR-NEXT: mv a3, a5 +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB11_1 +; HYBRID-CAP-PTR-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, s1 +; HYBRID-CAP-PTR-NEXT: mv a3, s0 +; HYBRID-CAP-PTR-NEXT: j .LBB11_1 +; HYBRID-CAP-PTR-NEXT: .LBB11_7: # %atomicrmw.end +; HYBRID-CAP-PTR-NEXT: mv a0, a4 +; HYBRID-CAP-PTR-NEXT: mv a1, a5 +; HYBRID-CAP-PTR-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 32 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_umax +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) +; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i64 [[LOADED]], ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 5, i32 5) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_umax +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; HYBRID-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; HYBRID-IR: atomicrmw.start: +; HYBRID-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; HYBRID-IR-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[LOADED]], [[VAL]] +; HYBRID-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i64 [[LOADED]], ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i64 [[NEW]], i32 5, i32 5) +; HYBRID-IR-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 +; HYBRID-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 +; HYBRID-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 +; HYBRID-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; HYBRID-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; HYBRID-IR: atomicrmw.end: +; HYBRID-IR-NEXT: ret i64 [[NEWLOADED]] +; + %tmp = atomicrmw umax ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define i64 @atomic_umin(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: atomic_umin: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -48 +; PURECAP-NEXT: csc cra, 40(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill +; PURECAP-NEXT: cmove cs3, ca0 +; PURECAP-NEXT: clw a5, 4(ca0) +; PURECAP-NEXT: clw a4, 0(ca0) +; PURECAP-NEXT: mv s1, a2 +; PURECAP-NEXT: mv s2, a1 +; PURECAP-NEXT: cincoffset ca0, csp, 0 +; PURECAP-NEXT: csetbounds cs0, ca0, 8 +; PURECAP-NEXT: j .LBB12_2 +; PURECAP-NEXT: .LBB12_1: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-NEXT: csw a4, 0(csp) +; PURECAP-NEXT: csw a5, 4(csp) +; PURECAP-NEXT: li a4, 5 +; PURECAP-NEXT: li a5, 5 +; PURECAP-NEXT: cmove ca0, cs3 +; PURECAP-NEXT: cmove ca1, cs0 +; PURECAP-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-NEXT: clw a5, 4(csp) +; PURECAP-NEXT: clw a4, 0(csp) +; PURECAP-NEXT: bnez a0, .LBB12_7 +; PURECAP-NEXT: .LBB12_2: # %atomicrmw.start +; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: beq a5, s1, .LBB12_4 +; PURECAP-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-NEXT: sltu a0, s1, a5 +; PURECAP-NEXT: j .LBB12_5 +; PURECAP-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-NEXT: sltu a0, s2, a4 +; PURECAP-NEXT: .LBB12_5: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-NEXT: xori a0, a0, 1 +; PURECAP-NEXT: mv a2, a4 +; PURECAP-NEXT: mv a3, a5 +; PURECAP-NEXT: bnez a0, .LBB12_1 +; PURECAP-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-NEXT: mv a2, s2 +; PURECAP-NEXT: mv a3, s1 +; PURECAP-NEXT: j .LBB12_1 +; PURECAP-NEXT: .LBB12_7: # %atomicrmw.end +; PURECAP-NEXT: mv a0, a4 +; PURECAP-NEXT: mv a1, a5 +; PURECAP-NEXT: clc cra, 40(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 48 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_umin: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -32 +; HYBRID-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: lw a5, 4(a0) +; HYBRID-NEXT: lw a4, 0(a0) +; HYBRID-NEXT: mv s1, a2 +; HYBRID-NEXT: mv s2, a1 +; HYBRID-NEXT: j .LBB12_2 +; HYBRID-NEXT: .LBB12_1: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-NEXT: sw a4, 8(sp) +; HYBRID-NEXT: sw a5, 12(sp) +; HYBRID-NEXT: addi a1, sp, 8 +; HYBRID-NEXT: li a4, 5 +; HYBRID-NEXT: li a5, 5 +; HYBRID-NEXT: mv a0, s0 +; HYBRID-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-NEXT: lw a5, 12(sp) +; HYBRID-NEXT: lw a4, 8(sp) +; HYBRID-NEXT: bnez a0, .LBB12_7 +; HYBRID-NEXT: .LBB12_2: # %atomicrmw.start +; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: beq a5, s1, .LBB12_4 +; HYBRID-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-NEXT: sltu a0, s1, a5 +; HYBRID-NEXT: j .LBB12_5 +; HYBRID-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-NEXT: sltu a0, s2, a4 +; HYBRID-NEXT: .LBB12_5: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-NEXT: xori a0, a0, 1 +; HYBRID-NEXT: mv a2, a4 +; HYBRID-NEXT: mv a3, a5 +; HYBRID-NEXT: bnez a0, .LBB12_1 +; HYBRID-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-NEXT: mv a2, s2 +; HYBRID-NEXT: mv a3, s1 +; HYBRID-NEXT: j .LBB12_1 +; HYBRID-NEXT: .LBB12_7: # %atomicrmw.end +; HYBRID-NEXT: mv a0, a4 +; HYBRID-NEXT: mv a1, a5 +; HYBRID-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 32 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_umin: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -32 +; HYBRID-CAP-PTR-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: lw.cap a4, (ca0) +; HYBRID-CAP-PTR-NEXT: sc ca0, 0(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: cincoffset ca0, ca0, 4 +; HYBRID-CAP-PTR-NEXT: lw.cap a5, (ca0) +; HYBRID-CAP-PTR-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-NEXT: j .LBB12_2 +; HYBRID-CAP-PTR-NEXT: .LBB12_1: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sw a4, 8(sp) +; HYBRID-CAP-PTR-NEXT: sw a5, 12(sp) +; HYBRID-CAP-PTR-NEXT: addi a1, sp, 8 +; HYBRID-CAP-PTR-NEXT: li a4, 5 +; HYBRID-CAP-PTR-NEXT: li a5, 5 +; HYBRID-CAP-PTR-NEXT: lc ca0, 0(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw a5, 12(sp) +; HYBRID-CAP-PTR-NEXT: lw a4, 8(sp) +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB12_7 +; HYBRID-CAP-PTR-NEXT: .LBB12_2: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-CAP-PTR-NEXT: beq a5, s0, .LBB12_4 +; HYBRID-CAP-PTR-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s0, a5 +; HYBRID-CAP-PTR-NEXT: j .LBB12_5 +; HYBRID-CAP-PTR-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s1, a4 +; HYBRID-CAP-PTR-NEXT: .LBB12_5: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: xori a0, a0, 1 +; HYBRID-CAP-PTR-NEXT: mv a2, a4 +; HYBRID-CAP-PTR-NEXT: mv a3, a5 +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB12_1 +; HYBRID-CAP-PTR-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, s1 +; HYBRID-CAP-PTR-NEXT: mv a3, s0 +; HYBRID-CAP-PTR-NEXT: j .LBB12_1 +; HYBRID-CAP-PTR-NEXT: .LBB12_7: # %atomicrmw.end +; HYBRID-CAP-PTR-NEXT: mv a0, a4 +; HYBRID-CAP-PTR-NEXT: mv a1, a5 +; HYBRID-CAP-PTR-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 32 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_umin +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) +; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp ule i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i64 [[LOADED]], ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 5, i32 5) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_umin +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; HYBRID-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; HYBRID-IR: atomicrmw.start: +; HYBRID-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; HYBRID-IR-NEXT: [[TMP3:%.*]] = icmp ule i64 [[LOADED]], [[VAL]] +; HYBRID-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i64 [[LOADED]], ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i64 [[NEW]], i32 5, i32 5) +; HYBRID-IR-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 +; HYBRID-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 +; HYBRID-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 +; HYBRID-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; HYBRID-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; HYBRID-IR: atomicrmw.end: +; HYBRID-IR-NEXT: ret i64 [[NEWLOADED]] +; + %tmp = atomicrmw umin ptr addrspace(200) %ptr, i64 %val seq_cst + ret i64 %tmp +} + +define { i64, i1 } @cmpxchg_weak(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { +; PURECAP-LABEL: cmpxchg_weak: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -32 +; PURECAP-NEXT: csc cra, 24(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill +; PURECAP-NEXT: mv a6, a5 +; PURECAP-NEXT: mv a7, a4 +; PURECAP-NEXT: cmove ct0, ca1 +; PURECAP-NEXT: cmove cs0, ca0 +; PURECAP-NEXT: csw a3, 12(csp) +; PURECAP-NEXT: csw a2, 8(csp) +; PURECAP-NEXT: cincoffset ca0, csp, 8 +; PURECAP-NEXT: csetbounds ca1, ca0, 8 +; PURECAP-NEXT: li a4, 4 +; PURECAP-NEXT: li a5, 2 +; PURECAP-NEXT: cmove ca0, ct0 +; PURECAP-NEXT: mv a2, a7 +; PURECAP-NEXT: mv a3, a6 +; PURECAP-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-NEXT: clw a1, 12(csp) +; PURECAP-NEXT: clw a2, 8(csp) +; PURECAP-NEXT: csw a1, 4(cs0) +; PURECAP-NEXT: csw a2, 0(cs0) +; PURECAP-NEXT: csb a0, 8(cs0) +; PURECAP-NEXT: clc cra, 24(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 32 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: cmpxchg_weak: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-NEXT: mv a6, a5 +; HYBRID-NEXT: mv a7, a4 +; HYBRID-NEXT: mv t0, a1 +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: sw a3, 4(sp) +; HYBRID-NEXT: sw a2, 0(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a4, 4 +; HYBRID-NEXT: li a5, 2 +; HYBRID-NEXT: mv a0, t0 +; HYBRID-NEXT: mv a2, a7 +; HYBRID-NEXT: mv a3, a6 +; HYBRID-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-NEXT: lw a1, 4(sp) +; HYBRID-NEXT: lw a2, 0(sp) +; HYBRID-NEXT: sw a1, 4(s0) +; HYBRID-NEXT: sw a2, 0(s0) +; HYBRID-NEXT: sb a0, 8(s0) +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: cmpxchg_weak: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: mv a6, a5 +; HYBRID-CAP-PTR-NEXT: mv a7, a4 +; HYBRID-CAP-PTR-NEXT: cmove ct0, ca1 +; HYBRID-CAP-PTR-NEXT: mv s0, a0 +; HYBRID-CAP-PTR-NEXT: sw a3, 4(sp) +; HYBRID-CAP-PTR-NEXT: sw a2, 0(sp) +; HYBRID-CAP-PTR-NEXT: mv a1, sp +; HYBRID-CAP-PTR-NEXT: li a4, 4 +; HYBRID-CAP-PTR-NEXT: li a5, 2 +; HYBRID-CAP-PTR-NEXT: cmove ca0, ct0 +; HYBRID-CAP-PTR-NEXT: mv a2, a7 +; HYBRID-CAP-PTR-NEXT: mv a3, a6 +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw a1, 4(sp) +; HYBRID-CAP-PTR-NEXT: lw a2, 0(sp) +; HYBRID-CAP-PTR-NEXT: sw a1, 4(s0) +; HYBRID-CAP-PTR-NEXT: sw a2, 0(s0) +; HYBRID-CAP-PTR-NEXT: sb a0, 8(s0) +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@cmpxchg_weak +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[EXP:%.*]], i64 [[NEW:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i64 [[EXP]], ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 4, i32 2) +; PURECAP-IR-NEXT: [[TMP3:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP4:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP3]], 0 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = insertvalue { i64, i1 } [[TMP4]], i1 [[TMP2]], 1 +; PURECAP-IR-NEXT: ret { i64, i1 } [[TMP5]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@cmpxchg_weak +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[EXP:%.*]], i64 [[NEW:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8 +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i64 [[EXP]], ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_8_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i64 [[NEW]], i32 4, i32 2) +; HYBRID-IR-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP4:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP3]], 0 +; HYBRID-IR-NEXT: [[TMP5:%.*]] = insertvalue { i64, i1 } [[TMP4]], i1 [[TMP2]], 1 +; HYBRID-IR-NEXT: ret { i64, i1 } [[TMP5]] +; + %1 = cmpxchg weak ptr addrspace(200) %ptr, i64 %exp, i64 %new acq_rel acquire + ret { i64, i1 } %1 +} + +define { i64, i1 } @cmpxchg_strong(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { +; PURECAP-LABEL: cmpxchg_strong: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -32 +; PURECAP-NEXT: csc cra, 24(csp) # 8-byte Folded Spill +; PURECAP-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill +; PURECAP-NEXT: mv a6, a5 +; PURECAP-NEXT: mv a7, a4 +; PURECAP-NEXT: cmove ct0, ca1 +; PURECAP-NEXT: cmove cs0, ca0 +; PURECAP-NEXT: csw a3, 12(csp) +; PURECAP-NEXT: csw a2, 8(csp) +; PURECAP-NEXT: cincoffset ca0, csp, 8 +; PURECAP-NEXT: csetbounds ca1, ca0, 8 +; PURECAP-NEXT: li a4, 5 +; PURECAP-NEXT: li a5, 5 +; PURECAP-NEXT: cmove ca0, ct0 +; PURECAP-NEXT: mv a2, a7 +; PURECAP-NEXT: mv a3, a6 +; PURECAP-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-NEXT: clw a1, 12(csp) +; PURECAP-NEXT: clw a2, 8(csp) +; PURECAP-NEXT: csw a1, 4(cs0) +; PURECAP-NEXT: csw a2, 0(cs0) +; PURECAP-NEXT: csb a0, 8(cs0) +; PURECAP-NEXT: clc cra, 24(csp) # 8-byte Folded Reload +; PURECAP-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 32 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: cmpxchg_strong: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-NEXT: mv a6, a5 +; HYBRID-NEXT: mv a7, a4 +; HYBRID-NEXT: mv t0, a1 +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: sw a3, 4(sp) +; HYBRID-NEXT: sw a2, 0(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a4, 5 +; HYBRID-NEXT: li a5, 5 +; HYBRID-NEXT: mv a0, t0 +; HYBRID-NEXT: mv a2, a7 +; HYBRID-NEXT: mv a3, a6 +; HYBRID-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-NEXT: lw a1, 4(sp) +; HYBRID-NEXT: lw a2, 0(sp) +; HYBRID-NEXT: sw a1, 4(s0) +; HYBRID-NEXT: sw a2, 0(s0) +; HYBRID-NEXT: sb a0, 8(s0) +; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: cmpxchg_strong: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: mv a6, a5 +; HYBRID-CAP-PTR-NEXT: mv a7, a4 +; HYBRID-CAP-PTR-NEXT: cmove ct0, ca1 +; HYBRID-CAP-PTR-NEXT: mv s0, a0 +; HYBRID-CAP-PTR-NEXT: sw a3, 4(sp) +; HYBRID-CAP-PTR-NEXT: sw a2, 0(sp) +; HYBRID-CAP-PTR-NEXT: mv a1, sp +; HYBRID-CAP-PTR-NEXT: li a4, 5 +; HYBRID-CAP-PTR-NEXT: li a5, 5 +; HYBRID-CAP-PTR-NEXT: cmove ca0, ct0 +; HYBRID-CAP-PTR-NEXT: mv a2, a7 +; HYBRID-CAP-PTR-NEXT: mv a3, a6 +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_8_c@plt +; HYBRID-CAP-PTR-NEXT: lw a1, 4(sp) +; HYBRID-CAP-PTR-NEXT: lw a2, 0(sp) +; HYBRID-CAP-PTR-NEXT: sw a1, 4(s0) +; HYBRID-CAP-PTR-NEXT: sw a2, 0(s0) +; HYBRID-CAP-PTR-NEXT: sb a0, 8(s0) +; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@cmpxchg_strong +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[EXP:%.*]], i64 [[NEW:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i64 [[EXP]], ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 5, i32 5) +; PURECAP-IR-NEXT: [[TMP3:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP4:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP3]], 0 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = insertvalue { i64, i1 } [[TMP4]], i1 [[TMP2]], 1 +; PURECAP-IR-NEXT: ret { i64, i1 } [[TMP5]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@cmpxchg_strong +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[EXP:%.*]], i64 [[NEW:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8 +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i64 [[EXP]], ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_8_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i64 [[NEW]], i32 5, i32 5) +; HYBRID-IR-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP1]], align 8 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP4:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP3]], 0 +; HYBRID-IR-NEXT: [[TMP5:%.*]] = insertvalue { i64, i1 } [[TMP4]], i1 [[TMP2]], 1 +; HYBRID-IR-NEXT: ret { i64, i1 } [[TMP5]] +; + %1 = cmpxchg ptr addrspace(200) %ptr, i64 %exp, i64 %new seq_cst seq_cst + ret { i64, i1 } %1 +} diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll new file mode 100644 index 000000000000..f1139a4b261a --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll @@ -0,0 +1,1517 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/atomic-cap-size-int.ll +;; Check that we can atomically update i128 (i64 for 32-bit systems) +;; For systems without double-width atomics (RISC-V, MIPS) we can use capability atomics +;; This is needed so we can report true for __atomic_always_lock_free(sizeof(uintptr_t), 0) +; RUN: opt -data-layout="e-m:e-pf200:128:128:128:64-p:64:64-i64:64-i128:128-n64-S128-A200-P200-G200" -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -atomic-expand -S -mattr=+a < %s | FileCheck %s --check-prefix=PURECAP-IR +; RUN: opt -data-layout="e-m:e-pf200:128:128:128:64-p:64:64-i64:64-i128:128-n64-S128" -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -atomic-expand -S -mattr=+a < %s | FileCheck %s --check-prefix=HYBRID-IR +; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -mattr=+a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-ATOMICS --allow-unused-prefixes +; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -mattr=-a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-LIBCALLS --allow-unused-prefixes +; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -mattr=+a | FileCheck %s --check-prefixes=HYBRID,HYBRID-ATOMICS --allow-unused-prefixes +; RUN: sed 's/addrspace(200)/addrspace(0)/g' %s | llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -mattr=-a | FileCheck %s --check-prefixes=HYBRID,HYBRID-LIBCALLS --allow-unused-prefixes +; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID-CAP-PTR,HYBRID-CAP-PTR-ATOMICS --allow-unused-prefixes +; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -mattr=-a < %s | FileCheck %s --check-prefixes=HYBRID-CAP-PTR,HYBRID-CAP-PTR-LIBCALLS --allow-unused-prefixes + +define i128 @store(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: store: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -48 +; PURECAP-NEXT: csc cra, 32(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs0, 16(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs1, 0(csp) # 16-byte Folded Spill +; PURECAP-NEXT: mv s0, a2 +; PURECAP-NEXT: mv s1, a1 +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_store_16 +; PURECAP-NEXT: mv a0, s1 +; PURECAP-NEXT: mv a1, s0 +; PURECAP-NEXT: clc cra, 32(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs0, 16(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs1, 0(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 48 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: store: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -32 +; HYBRID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; HYBRID-NEXT: mv s0, a2 +; HYBRID-NEXT: mv s1, a1 +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_store_16@plt +; HYBRID-NEXT: mv a0, s1 +; HYBRID-NEXT: mv a1, s0 +; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 32 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: store: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -32 +; HYBRID-CAP-PTR-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_store_16_c@plt +; HYBRID-CAP-PTR-NEXT: mv a0, s1 +; HYBRID-CAP-PTR-NEXT: mv a1, s0 +; HYBRID-CAP-PTR-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 32 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@store +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { +; PURECAP-IR-NEXT: call void @__atomic_store_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i128 [[VAL]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@store +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { +; HYBRID-IR-NEXT: call void @__atomic_store_16_c(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i128 [[VAL]] +; + store atomic i128 %val, ptr addrspace(200) %ptr seq_cst, align 16 + ret i128 %val +} + +define i128 @load(ptr addrspace(200) %ptr) nounwind { +; PURECAP-LABEL: load: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-NEXT: li a1, 5 +; PURECAP-NEXT: ccall __atomic_load_16 +; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: load: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-NEXT: li a1, 5 +; HYBRID-NEXT: call __atomic_load_16@plt +; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: load: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a1, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_load_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@load +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_load_16(ptr addrspace(200) [[PTR]], i32 5) +; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@load +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_load_16_c(ptr addrspace(200) [[PTR]], i32 5) +; HYBRID-IR-NEXT: ret i128 [[TMP1]] +; + %val = load atomic i128, ptr addrspace(200) %ptr seq_cst, align 16 + ret i128 %val +} + +define i128 @atomic_xchg(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_xchg: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_exchange_16 +; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_xchg: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_exchange_16@plt +; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_xchg: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_exchange_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xchg +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_exchange_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xchg +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_exchange_16_c(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i128 [[TMP1]] +; + %tmp = atomicrmw xchg ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_add(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_add: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_add_16 +; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_add: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_add_16@plt +; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_add: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_add_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_add +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_add_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_add +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_add_16_c(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i128 [[TMP1]] +; + %tmp = atomicrmw add ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_sub(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_sub: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_sub_16 +; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_sub: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_sub_16@plt +; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_sub: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_sub_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_sub +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_sub_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_sub +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_sub_16_c(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i128 [[TMP1]] +; + %tmp = atomicrmw sub ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_and(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_and: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_and_16 +; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_and: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_and_16@plt +; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_and: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_and_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_and +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_and_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_and +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_and_16_c(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i128 [[TMP1]] +; + %tmp = atomicrmw and ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_nand(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_nand: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_nand_16 +; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_nand: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_nand_16@plt +; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_nand: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_nand_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_nand +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_nand_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_nand +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_nand_16_c(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i128 [[TMP1]] +; + %tmp = atomicrmw nand ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_or(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_or: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_or_16 +; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_or: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_or_16@plt +; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_or: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_or_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_or +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_or_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_or +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_or_16_c(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i128 [[TMP1]] +; + %tmp = atomicrmw or ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_xor(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_xor: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -16 +; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-NEXT: li a3, 5 +; PURECAP-NEXT: ccall __atomic_fetch_xor_16 +; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 16 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_xor: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -16 +; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-NEXT: li a3, 5 +; HYBRID-NEXT: call __atomic_fetch_xor_16@plt +; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 16 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_xor: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: li a3, 5 +; HYBRID-CAP-PTR-NEXT: call __atomic_fetch_xor_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xor +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_xor_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xor +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_xor_16_c(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; HYBRID-IR-NEXT: ret i128 [[TMP1]] +; + %tmp = atomicrmw xor ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_max(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_max: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -96 +; PURECAP-NEXT: csc cra, 80(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill +; PURECAP-NEXT: cmove cs3, ca0 +; PURECAP-NEXT: cld a5, 8(ca0) +; PURECAP-NEXT: cld a4, 0(ca0) +; PURECAP-NEXT: mv s1, a2 +; PURECAP-NEXT: mv s2, a1 +; PURECAP-NEXT: cincoffset ca0, csp, 0 +; PURECAP-NEXT: csetbounds cs0, ca0, 16 +; PURECAP-NEXT: j .LBB9_2 +; PURECAP-NEXT: .LBB9_1: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-NEXT: csd a4, 0(csp) +; PURECAP-NEXT: csd a5, 8(csp) +; PURECAP-NEXT: li a4, 5 +; PURECAP-NEXT: li a5, 5 +; PURECAP-NEXT: cmove ca0, cs3 +; PURECAP-NEXT: cmove ca1, cs0 +; PURECAP-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-NEXT: cld a5, 8(csp) +; PURECAP-NEXT: cld a4, 0(csp) +; PURECAP-NEXT: bnez a0, .LBB9_7 +; PURECAP-NEXT: .LBB9_2: # %atomicrmw.start +; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: beq a5, s1, .LBB9_4 +; PURECAP-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-NEXT: slt a0, s1, a5 +; PURECAP-NEXT: j .LBB9_5 +; PURECAP-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-NEXT: sltu a0, s2, a4 +; PURECAP-NEXT: .LBB9_5: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-NEXT: mv a2, a4 +; PURECAP-NEXT: mv a3, a5 +; PURECAP-NEXT: bnez a0, .LBB9_1 +; PURECAP-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-NEXT: mv a2, s2 +; PURECAP-NEXT: mv a3, s1 +; PURECAP-NEXT: j .LBB9_1 +; PURECAP-NEXT: .LBB9_7: # %atomicrmw.end +; PURECAP-NEXT: mv a0, a4 +; PURECAP-NEXT: mv a1, a5 +; PURECAP-NEXT: clc cra, 80(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 96 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_max: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -48 +; HYBRID-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s0, 32(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s1, 24(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s2, 16(sp) # 8-byte Folded Spill +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: ld a5, 8(a0) +; HYBRID-NEXT: ld a4, 0(a0) +; HYBRID-NEXT: mv s1, a2 +; HYBRID-NEXT: mv s2, a1 +; HYBRID-NEXT: j .LBB9_2 +; HYBRID-NEXT: .LBB9_1: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-NEXT: sd a4, 0(sp) +; HYBRID-NEXT: sd a5, 8(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a4, 5 +; HYBRID-NEXT: li a5, 5 +; HYBRID-NEXT: mv a0, s0 +; HYBRID-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-NEXT: ld a5, 8(sp) +; HYBRID-NEXT: ld a4, 0(sp) +; HYBRID-NEXT: bnez a0, .LBB9_7 +; HYBRID-NEXT: .LBB9_2: # %atomicrmw.start +; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: beq a5, s1, .LBB9_4 +; HYBRID-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-NEXT: slt a0, s1, a5 +; HYBRID-NEXT: j .LBB9_5 +; HYBRID-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-NEXT: sltu a0, s2, a4 +; HYBRID-NEXT: .LBB9_5: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-NEXT: mv a2, a4 +; HYBRID-NEXT: mv a3, a5 +; HYBRID-NEXT: bnez a0, .LBB9_1 +; HYBRID-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-NEXT: mv a2, s2 +; HYBRID-NEXT: mv a3, s1 +; HYBRID-NEXT: j .LBB9_1 +; HYBRID-NEXT: .LBB9_7: # %atomicrmw.end +; HYBRID-NEXT: mv a0, a4 +; HYBRID-NEXT: mv a1, a5 +; HYBRID-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s0, 32(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s1, 24(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s2, 16(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 48 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_max: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -64 +; HYBRID-CAP-PTR-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s1, 40(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: ld.cap a4, (ca0) +; HYBRID-CAP-PTR-NEXT: sc ca0, 0(sp) # 16-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: cincoffset ca0, ca0, 8 +; HYBRID-CAP-PTR-NEXT: ld.cap a5, (ca0) +; HYBRID-CAP-PTR-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-NEXT: j .LBB9_2 +; HYBRID-CAP-PTR-NEXT: .LBB9_1: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sd a4, 16(sp) +; HYBRID-CAP-PTR-NEXT: sd a5, 24(sp) +; HYBRID-CAP-PTR-NEXT: addi a1, sp, 16 +; HYBRID-CAP-PTR-NEXT: li a4, 5 +; HYBRID-CAP-PTR-NEXT: li a5, 5 +; HYBRID-CAP-PTR-NEXT: lc ca0, 0(sp) # 16-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld a5, 24(sp) +; HYBRID-CAP-PTR-NEXT: ld a4, 16(sp) +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB9_7 +; HYBRID-CAP-PTR-NEXT: .LBB9_2: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-CAP-PTR-NEXT: beq a5, s0, .LBB9_4 +; HYBRID-CAP-PTR-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: slt a0, s0, a5 +; HYBRID-CAP-PTR-NEXT: j .LBB9_5 +; HYBRID-CAP-PTR-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s1, a4 +; HYBRID-CAP-PTR-NEXT: .LBB9_5: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, a4 +; HYBRID-CAP-PTR-NEXT: mv a3, a5 +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB9_1 +; HYBRID-CAP-PTR-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, s1 +; HYBRID-CAP-PTR-NEXT: mv a3, s0 +; HYBRID-CAP-PTR-NEXT: j .LBB9_1 +; HYBRID-CAP-PTR-NEXT: .LBB9_7: # %atomicrmw.end +; HYBRID-CAP-PTR-NEXT: mv a0, a4 +; HYBRID-CAP-PTR-NEXT: mv a1, a5 +; HYBRID-CAP-PTR-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s1, 40(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 64 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_max +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) +; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp sgt i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i128 [[LOADED]], ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 5, i32 5) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_max +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; HYBRID-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; HYBRID-IR: atomicrmw.start: +; HYBRID-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; HYBRID-IR-NEXT: [[TMP3:%.*]] = icmp sgt i128 [[LOADED]], [[VAL]] +; HYBRID-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i128 [[LOADED]], ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i128 [[NEW]], i32 5, i32 5) +; HYBRID-IR-NEXT: [[TMP5:%.*]] = load i128, ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 +; HYBRID-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 +; HYBRID-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 +; HYBRID-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; HYBRID-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; HYBRID-IR: atomicrmw.end: +; HYBRID-IR-NEXT: ret i128 [[NEWLOADED]] +; + %tmp = atomicrmw max ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_min(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_min: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -96 +; PURECAP-NEXT: csc cra, 80(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill +; PURECAP-NEXT: cmove cs3, ca0 +; PURECAP-NEXT: cld a5, 8(ca0) +; PURECAP-NEXT: cld a4, 0(ca0) +; PURECAP-NEXT: mv s1, a2 +; PURECAP-NEXT: mv s2, a1 +; PURECAP-NEXT: cincoffset ca0, csp, 0 +; PURECAP-NEXT: csetbounds cs0, ca0, 16 +; PURECAP-NEXT: j .LBB10_2 +; PURECAP-NEXT: .LBB10_1: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-NEXT: csd a4, 0(csp) +; PURECAP-NEXT: csd a5, 8(csp) +; PURECAP-NEXT: li a4, 5 +; PURECAP-NEXT: li a5, 5 +; PURECAP-NEXT: cmove ca0, cs3 +; PURECAP-NEXT: cmove ca1, cs0 +; PURECAP-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-NEXT: cld a5, 8(csp) +; PURECAP-NEXT: cld a4, 0(csp) +; PURECAP-NEXT: bnez a0, .LBB10_7 +; PURECAP-NEXT: .LBB10_2: # %atomicrmw.start +; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: beq a5, s1, .LBB10_4 +; PURECAP-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-NEXT: slt a0, s1, a5 +; PURECAP-NEXT: j .LBB10_5 +; PURECAP-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-NEXT: sltu a0, s2, a4 +; PURECAP-NEXT: .LBB10_5: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-NEXT: xori a0, a0, 1 +; PURECAP-NEXT: mv a2, a4 +; PURECAP-NEXT: mv a3, a5 +; PURECAP-NEXT: bnez a0, .LBB10_1 +; PURECAP-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-NEXT: mv a2, s2 +; PURECAP-NEXT: mv a3, s1 +; PURECAP-NEXT: j .LBB10_1 +; PURECAP-NEXT: .LBB10_7: # %atomicrmw.end +; PURECAP-NEXT: mv a0, a4 +; PURECAP-NEXT: mv a1, a5 +; PURECAP-NEXT: clc cra, 80(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 96 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_min: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -48 +; HYBRID-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s0, 32(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s1, 24(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s2, 16(sp) # 8-byte Folded Spill +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: ld a5, 8(a0) +; HYBRID-NEXT: ld a4, 0(a0) +; HYBRID-NEXT: mv s1, a2 +; HYBRID-NEXT: mv s2, a1 +; HYBRID-NEXT: j .LBB10_2 +; HYBRID-NEXT: .LBB10_1: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-NEXT: sd a4, 0(sp) +; HYBRID-NEXT: sd a5, 8(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a4, 5 +; HYBRID-NEXT: li a5, 5 +; HYBRID-NEXT: mv a0, s0 +; HYBRID-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-NEXT: ld a5, 8(sp) +; HYBRID-NEXT: ld a4, 0(sp) +; HYBRID-NEXT: bnez a0, .LBB10_7 +; HYBRID-NEXT: .LBB10_2: # %atomicrmw.start +; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: beq a5, s1, .LBB10_4 +; HYBRID-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-NEXT: slt a0, s1, a5 +; HYBRID-NEXT: j .LBB10_5 +; HYBRID-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-NEXT: sltu a0, s2, a4 +; HYBRID-NEXT: .LBB10_5: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-NEXT: xori a0, a0, 1 +; HYBRID-NEXT: mv a2, a4 +; HYBRID-NEXT: mv a3, a5 +; HYBRID-NEXT: bnez a0, .LBB10_1 +; HYBRID-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-NEXT: mv a2, s2 +; HYBRID-NEXT: mv a3, s1 +; HYBRID-NEXT: j .LBB10_1 +; HYBRID-NEXT: .LBB10_7: # %atomicrmw.end +; HYBRID-NEXT: mv a0, a4 +; HYBRID-NEXT: mv a1, a5 +; HYBRID-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s0, 32(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s1, 24(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s2, 16(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 48 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_min: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -64 +; HYBRID-CAP-PTR-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s1, 40(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: ld.cap a4, (ca0) +; HYBRID-CAP-PTR-NEXT: sc ca0, 0(sp) # 16-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: cincoffset ca0, ca0, 8 +; HYBRID-CAP-PTR-NEXT: ld.cap a5, (ca0) +; HYBRID-CAP-PTR-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-NEXT: j .LBB10_2 +; HYBRID-CAP-PTR-NEXT: .LBB10_1: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sd a4, 16(sp) +; HYBRID-CAP-PTR-NEXT: sd a5, 24(sp) +; HYBRID-CAP-PTR-NEXT: addi a1, sp, 16 +; HYBRID-CAP-PTR-NEXT: li a4, 5 +; HYBRID-CAP-PTR-NEXT: li a5, 5 +; HYBRID-CAP-PTR-NEXT: lc ca0, 0(sp) # 16-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld a5, 24(sp) +; HYBRID-CAP-PTR-NEXT: ld a4, 16(sp) +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB10_7 +; HYBRID-CAP-PTR-NEXT: .LBB10_2: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-CAP-PTR-NEXT: beq a5, s0, .LBB10_4 +; HYBRID-CAP-PTR-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: slt a0, s0, a5 +; HYBRID-CAP-PTR-NEXT: j .LBB10_5 +; HYBRID-CAP-PTR-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s1, a4 +; HYBRID-CAP-PTR-NEXT: .LBB10_5: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: xori a0, a0, 1 +; HYBRID-CAP-PTR-NEXT: mv a2, a4 +; HYBRID-CAP-PTR-NEXT: mv a3, a5 +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB10_1 +; HYBRID-CAP-PTR-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, s1 +; HYBRID-CAP-PTR-NEXT: mv a3, s0 +; HYBRID-CAP-PTR-NEXT: j .LBB10_1 +; HYBRID-CAP-PTR-NEXT: .LBB10_7: # %atomicrmw.end +; HYBRID-CAP-PTR-NEXT: mv a0, a4 +; HYBRID-CAP-PTR-NEXT: mv a1, a5 +; HYBRID-CAP-PTR-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s1, 40(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 64 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_min +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) +; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp sle i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i128 [[LOADED]], ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 5, i32 5) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_min +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; HYBRID-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; HYBRID-IR: atomicrmw.start: +; HYBRID-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; HYBRID-IR-NEXT: [[TMP3:%.*]] = icmp sle i128 [[LOADED]], [[VAL]] +; HYBRID-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i128 [[LOADED]], ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i128 [[NEW]], i32 5, i32 5) +; HYBRID-IR-NEXT: [[TMP5:%.*]] = load i128, ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 +; HYBRID-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 +; HYBRID-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 +; HYBRID-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; HYBRID-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; HYBRID-IR: atomicrmw.end: +; HYBRID-IR-NEXT: ret i128 [[NEWLOADED]] +; + %tmp = atomicrmw min ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_umax(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_umax: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -96 +; PURECAP-NEXT: csc cra, 80(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill +; PURECAP-NEXT: cmove cs3, ca0 +; PURECAP-NEXT: cld a5, 8(ca0) +; PURECAP-NEXT: cld a4, 0(ca0) +; PURECAP-NEXT: mv s1, a2 +; PURECAP-NEXT: mv s2, a1 +; PURECAP-NEXT: cincoffset ca0, csp, 0 +; PURECAP-NEXT: csetbounds cs0, ca0, 16 +; PURECAP-NEXT: j .LBB11_2 +; PURECAP-NEXT: .LBB11_1: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-NEXT: csd a4, 0(csp) +; PURECAP-NEXT: csd a5, 8(csp) +; PURECAP-NEXT: li a4, 5 +; PURECAP-NEXT: li a5, 5 +; PURECAP-NEXT: cmove ca0, cs3 +; PURECAP-NEXT: cmove ca1, cs0 +; PURECAP-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-NEXT: cld a5, 8(csp) +; PURECAP-NEXT: cld a4, 0(csp) +; PURECAP-NEXT: bnez a0, .LBB11_7 +; PURECAP-NEXT: .LBB11_2: # %atomicrmw.start +; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: beq a5, s1, .LBB11_4 +; PURECAP-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-NEXT: sltu a0, s1, a5 +; PURECAP-NEXT: j .LBB11_5 +; PURECAP-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-NEXT: sltu a0, s2, a4 +; PURECAP-NEXT: .LBB11_5: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-NEXT: mv a2, a4 +; PURECAP-NEXT: mv a3, a5 +; PURECAP-NEXT: bnez a0, .LBB11_1 +; PURECAP-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-NEXT: mv a2, s2 +; PURECAP-NEXT: mv a3, s1 +; PURECAP-NEXT: j .LBB11_1 +; PURECAP-NEXT: .LBB11_7: # %atomicrmw.end +; PURECAP-NEXT: mv a0, a4 +; PURECAP-NEXT: mv a1, a5 +; PURECAP-NEXT: clc cra, 80(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 96 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_umax: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -48 +; HYBRID-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s0, 32(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s1, 24(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s2, 16(sp) # 8-byte Folded Spill +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: ld a5, 8(a0) +; HYBRID-NEXT: ld a4, 0(a0) +; HYBRID-NEXT: mv s1, a2 +; HYBRID-NEXT: mv s2, a1 +; HYBRID-NEXT: j .LBB11_2 +; HYBRID-NEXT: .LBB11_1: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-NEXT: sd a4, 0(sp) +; HYBRID-NEXT: sd a5, 8(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a4, 5 +; HYBRID-NEXT: li a5, 5 +; HYBRID-NEXT: mv a0, s0 +; HYBRID-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-NEXT: ld a5, 8(sp) +; HYBRID-NEXT: ld a4, 0(sp) +; HYBRID-NEXT: bnez a0, .LBB11_7 +; HYBRID-NEXT: .LBB11_2: # %atomicrmw.start +; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: beq a5, s1, .LBB11_4 +; HYBRID-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-NEXT: sltu a0, s1, a5 +; HYBRID-NEXT: j .LBB11_5 +; HYBRID-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-NEXT: sltu a0, s2, a4 +; HYBRID-NEXT: .LBB11_5: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-NEXT: mv a2, a4 +; HYBRID-NEXT: mv a3, a5 +; HYBRID-NEXT: bnez a0, .LBB11_1 +; HYBRID-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-NEXT: mv a2, s2 +; HYBRID-NEXT: mv a3, s1 +; HYBRID-NEXT: j .LBB11_1 +; HYBRID-NEXT: .LBB11_7: # %atomicrmw.end +; HYBRID-NEXT: mv a0, a4 +; HYBRID-NEXT: mv a1, a5 +; HYBRID-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s0, 32(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s1, 24(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s2, 16(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 48 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_umax: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -64 +; HYBRID-CAP-PTR-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s1, 40(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: ld.cap a4, (ca0) +; HYBRID-CAP-PTR-NEXT: sc ca0, 0(sp) # 16-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: cincoffset ca0, ca0, 8 +; HYBRID-CAP-PTR-NEXT: ld.cap a5, (ca0) +; HYBRID-CAP-PTR-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-NEXT: j .LBB11_2 +; HYBRID-CAP-PTR-NEXT: .LBB11_1: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sd a4, 16(sp) +; HYBRID-CAP-PTR-NEXT: sd a5, 24(sp) +; HYBRID-CAP-PTR-NEXT: addi a1, sp, 16 +; HYBRID-CAP-PTR-NEXT: li a4, 5 +; HYBRID-CAP-PTR-NEXT: li a5, 5 +; HYBRID-CAP-PTR-NEXT: lc ca0, 0(sp) # 16-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld a5, 24(sp) +; HYBRID-CAP-PTR-NEXT: ld a4, 16(sp) +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB11_7 +; HYBRID-CAP-PTR-NEXT: .LBB11_2: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-CAP-PTR-NEXT: beq a5, s0, .LBB11_4 +; HYBRID-CAP-PTR-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s0, a5 +; HYBRID-CAP-PTR-NEXT: j .LBB11_5 +; HYBRID-CAP-PTR-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s1, a4 +; HYBRID-CAP-PTR-NEXT: .LBB11_5: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, a4 +; HYBRID-CAP-PTR-NEXT: mv a3, a5 +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB11_1 +; HYBRID-CAP-PTR-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, s1 +; HYBRID-CAP-PTR-NEXT: mv a3, s0 +; HYBRID-CAP-PTR-NEXT: j .LBB11_1 +; HYBRID-CAP-PTR-NEXT: .LBB11_7: # %atomicrmw.end +; HYBRID-CAP-PTR-NEXT: mv a0, a4 +; HYBRID-CAP-PTR-NEXT: mv a1, a5 +; HYBRID-CAP-PTR-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s1, 40(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 64 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_umax +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) +; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp ugt i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i128 [[LOADED]], ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 5, i32 5) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_umax +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; HYBRID-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; HYBRID-IR: atomicrmw.start: +; HYBRID-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; HYBRID-IR-NEXT: [[TMP3:%.*]] = icmp ugt i128 [[LOADED]], [[VAL]] +; HYBRID-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i128 [[LOADED]], ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i128 [[NEW]], i32 5, i32 5) +; HYBRID-IR-NEXT: [[TMP5:%.*]] = load i128, ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 +; HYBRID-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 +; HYBRID-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 +; HYBRID-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; HYBRID-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; HYBRID-IR: atomicrmw.end: +; HYBRID-IR-NEXT: ret i128 [[NEWLOADED]] +; + %tmp = atomicrmw umax ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define i128 @atomic_umin(ptr addrspace(200) %ptr, i128 %val) nounwind { +; PURECAP-LABEL: atomic_umin: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -96 +; PURECAP-NEXT: csc cra, 80(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill +; PURECAP-NEXT: cmove cs3, ca0 +; PURECAP-NEXT: cld a5, 8(ca0) +; PURECAP-NEXT: cld a4, 0(ca0) +; PURECAP-NEXT: mv s1, a2 +; PURECAP-NEXT: mv s2, a1 +; PURECAP-NEXT: cincoffset ca0, csp, 0 +; PURECAP-NEXT: csetbounds cs0, ca0, 16 +; PURECAP-NEXT: j .LBB12_2 +; PURECAP-NEXT: .LBB12_1: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-NEXT: csd a4, 0(csp) +; PURECAP-NEXT: csd a5, 8(csp) +; PURECAP-NEXT: li a4, 5 +; PURECAP-NEXT: li a5, 5 +; PURECAP-NEXT: cmove ca0, cs3 +; PURECAP-NEXT: cmove ca1, cs0 +; PURECAP-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-NEXT: cld a5, 8(csp) +; PURECAP-NEXT: cld a4, 0(csp) +; PURECAP-NEXT: bnez a0, .LBB12_7 +; PURECAP-NEXT: .LBB12_2: # %atomicrmw.start +; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-NEXT: beq a5, s1, .LBB12_4 +; PURECAP-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-NEXT: sltu a0, s1, a5 +; PURECAP-NEXT: j .LBB12_5 +; PURECAP-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-NEXT: sltu a0, s2, a4 +; PURECAP-NEXT: .LBB12_5: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-NEXT: xori a0, a0, 1 +; PURECAP-NEXT: mv a2, a4 +; PURECAP-NEXT: mv a3, a5 +; PURECAP-NEXT: bnez a0, .LBB12_1 +; PURECAP-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-NEXT: mv a2, s2 +; PURECAP-NEXT: mv a3, s1 +; PURECAP-NEXT: j .LBB12_1 +; PURECAP-NEXT: .LBB12_7: # %atomicrmw.end +; PURECAP-NEXT: mv a0, a4 +; PURECAP-NEXT: mv a1, a5 +; PURECAP-NEXT: clc cra, 80(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 96 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: atomic_umin: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -48 +; HYBRID-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s0, 32(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s1, 24(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s2, 16(sp) # 8-byte Folded Spill +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: ld a5, 8(a0) +; HYBRID-NEXT: ld a4, 0(a0) +; HYBRID-NEXT: mv s1, a2 +; HYBRID-NEXT: mv s2, a1 +; HYBRID-NEXT: j .LBB12_2 +; HYBRID-NEXT: .LBB12_1: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-NEXT: sd a4, 0(sp) +; HYBRID-NEXT: sd a5, 8(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a4, 5 +; HYBRID-NEXT: li a5, 5 +; HYBRID-NEXT: mv a0, s0 +; HYBRID-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-NEXT: ld a5, 8(sp) +; HYBRID-NEXT: ld a4, 0(sp) +; HYBRID-NEXT: bnez a0, .LBB12_7 +; HYBRID-NEXT: .LBB12_2: # %atomicrmw.start +; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-NEXT: beq a5, s1, .LBB12_4 +; HYBRID-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-NEXT: sltu a0, s1, a5 +; HYBRID-NEXT: j .LBB12_5 +; HYBRID-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-NEXT: sltu a0, s2, a4 +; HYBRID-NEXT: .LBB12_5: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-NEXT: xori a0, a0, 1 +; HYBRID-NEXT: mv a2, a4 +; HYBRID-NEXT: mv a3, a5 +; HYBRID-NEXT: bnez a0, .LBB12_1 +; HYBRID-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-NEXT: mv a2, s2 +; HYBRID-NEXT: mv a3, s1 +; HYBRID-NEXT: j .LBB12_1 +; HYBRID-NEXT: .LBB12_7: # %atomicrmw.end +; HYBRID-NEXT: mv a0, a4 +; HYBRID-NEXT: mv a1, a5 +; HYBRID-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s0, 32(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s1, 24(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s2, 16(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 48 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: atomic_umin: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -64 +; HYBRID-CAP-PTR-NEXT: sd ra, 56(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s0, 48(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s1, 40(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: ld.cap a4, (ca0) +; HYBRID-CAP-PTR-NEXT: sc ca0, 0(sp) # 16-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: cincoffset ca0, ca0, 8 +; HYBRID-CAP-PTR-NEXT: ld.cap a5, (ca0) +; HYBRID-CAP-PTR-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-NEXT: j .LBB12_2 +; HYBRID-CAP-PTR-NEXT: .LBB12_1: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sd a4, 16(sp) +; HYBRID-CAP-PTR-NEXT: sd a5, 24(sp) +; HYBRID-CAP-PTR-NEXT: addi a1, sp, 16 +; HYBRID-CAP-PTR-NEXT: li a4, 5 +; HYBRID-CAP-PTR-NEXT: li a5, 5 +; HYBRID-CAP-PTR-NEXT: lc ca0, 0(sp) # 16-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld a5, 24(sp) +; HYBRID-CAP-PTR-NEXT: ld a4, 16(sp) +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB12_7 +; HYBRID-CAP-PTR-NEXT: .LBB12_2: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-CAP-PTR-NEXT: beq a5, s0, .LBB12_4 +; HYBRID-CAP-PTR-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s0, a5 +; HYBRID-CAP-PTR-NEXT: j .LBB12_5 +; HYBRID-CAP-PTR-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: sltu a0, s1, a4 +; HYBRID-CAP-PTR-NEXT: .LBB12_5: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: xori a0, a0, 1 +; HYBRID-CAP-PTR-NEXT: mv a2, a4 +; HYBRID-CAP-PTR-NEXT: mv a3, a5 +; HYBRID-CAP-PTR-NEXT: bnez a0, .LBB12_1 +; HYBRID-CAP-PTR-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-CAP-PTR-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-CAP-PTR-NEXT: mv a2, s1 +; HYBRID-CAP-PTR-NEXT: mv a3, s0 +; HYBRID-CAP-PTR-NEXT: j .LBB12_1 +; HYBRID-CAP-PTR-NEXT: .LBB12_7: # %atomicrmw.end +; HYBRID-CAP-PTR-NEXT: mv a0, a4 +; HYBRID-CAP-PTR-NEXT: mv a1, a5 +; HYBRID-CAP-PTR-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s1, 40(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 64 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_umin +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) +; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp ule i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i128 [[LOADED]], ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 5, i32 5) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_umin +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; HYBRID-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; HYBRID-IR: atomicrmw.start: +; HYBRID-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; HYBRID-IR-NEXT: [[TMP3:%.*]] = icmp ule i128 [[LOADED]], [[VAL]] +; HYBRID-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i128 [[LOADED]], ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i128 [[NEW]], i32 5, i32 5) +; HYBRID-IR-NEXT: [[TMP5:%.*]] = load i128, ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 +; HYBRID-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 +; HYBRID-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 +; HYBRID-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; HYBRID-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; HYBRID-IR: atomicrmw.end: +; HYBRID-IR-NEXT: ret i128 [[NEWLOADED]] +; + %tmp = atomicrmw umin ptr addrspace(200) %ptr, i128 %val seq_cst + ret i128 %tmp +} + +define { i128, i1 } @cmpxchg_weak(ptr addrspace(200) %ptr, i128 %exp, i128 %new) nounwind { +; PURECAP-LABEL: cmpxchg_weak: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -48 +; PURECAP-NEXT: csc cra, 32(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs0, 16(csp) # 16-byte Folded Spill +; PURECAP-NEXT: mv a6, a5 +; PURECAP-NEXT: mv a7, a4 +; PURECAP-NEXT: cmove ct0, ca1 +; PURECAP-NEXT: cmove cs0, ca0 +; PURECAP-NEXT: csd a3, 8(csp) +; PURECAP-NEXT: csd a2, 0(csp) +; PURECAP-NEXT: cincoffset ca0, csp, 0 +; PURECAP-NEXT: csetbounds ca1, ca0, 16 +; PURECAP-NEXT: li a4, 4 +; PURECAP-NEXT: li a5, 2 +; PURECAP-NEXT: cmove ca0, ct0 +; PURECAP-NEXT: mv a2, a7 +; PURECAP-NEXT: mv a3, a6 +; PURECAP-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-NEXT: cld a1, 8(csp) +; PURECAP-NEXT: cld a2, 0(csp) +; PURECAP-NEXT: csd a1, 8(cs0) +; PURECAP-NEXT: csd a2, 0(cs0) +; PURECAP-NEXT: csb a0, 16(cs0) +; PURECAP-NEXT: clc cra, 32(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs0, 16(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 48 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: cmpxchg_weak: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -32 +; HYBRID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; HYBRID-NEXT: mv a6, a5 +; HYBRID-NEXT: mv a7, a4 +; HYBRID-NEXT: mv t0, a1 +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: sd a3, 8(sp) +; HYBRID-NEXT: sd a2, 0(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a4, 4 +; HYBRID-NEXT: li a5, 2 +; HYBRID-NEXT: mv a0, t0 +; HYBRID-NEXT: mv a2, a7 +; HYBRID-NEXT: mv a3, a6 +; HYBRID-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-NEXT: ld a1, 8(sp) +; HYBRID-NEXT: ld a2, 0(sp) +; HYBRID-NEXT: sd a1, 8(s0) +; HYBRID-NEXT: sd a2, 0(s0) +; HYBRID-NEXT: sb a0, 16(s0) +; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 32 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: cmpxchg_weak: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -32 +; HYBRID-CAP-PTR-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: mv a6, a5 +; HYBRID-CAP-PTR-NEXT: mv a7, a4 +; HYBRID-CAP-PTR-NEXT: cmove ct0, ca1 +; HYBRID-CAP-PTR-NEXT: mv s0, a0 +; HYBRID-CAP-PTR-NEXT: sd a3, 8(sp) +; HYBRID-CAP-PTR-NEXT: sd a2, 0(sp) +; HYBRID-CAP-PTR-NEXT: mv a1, sp +; HYBRID-CAP-PTR-NEXT: li a4, 4 +; HYBRID-CAP-PTR-NEXT: li a5, 2 +; HYBRID-CAP-PTR-NEXT: cmove ca0, ct0 +; HYBRID-CAP-PTR-NEXT: mv a2, a7 +; HYBRID-CAP-PTR-NEXT: mv a3, a6 +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld a1, 8(sp) +; HYBRID-CAP-PTR-NEXT: ld a2, 0(sp) +; HYBRID-CAP-PTR-NEXT: sd a1, 8(s0) +; HYBRID-CAP-PTR-NEXT: sd a2, 0(s0) +; HYBRID-CAP-PTR-NEXT: sb a0, 16(s0) +; HYBRID-CAP-PTR-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 32 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@cmpxchg_weak +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i128 [[EXP]], ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 4, i32 2) +; PURECAP-IR-NEXT: [[TMP3:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP4:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP3]], 0 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = insertvalue { i128, i1 } [[TMP4]], i1 [[TMP2]], 1 +; PURECAP-IR-NEXT: ret { i128, i1 } [[TMP5]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@cmpxchg_weak +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16 +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i128 [[EXP]], ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_16_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i128 [[NEW]], i32 4, i32 2) +; HYBRID-IR-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP4:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP3]], 0 +; HYBRID-IR-NEXT: [[TMP5:%.*]] = insertvalue { i128, i1 } [[TMP4]], i1 [[TMP2]], 1 +; HYBRID-IR-NEXT: ret { i128, i1 } [[TMP5]] +; + %1 = cmpxchg weak ptr addrspace(200) %ptr, i128 %exp, i128 %new acq_rel acquire + ret { i128, i1 } %1 +} + +define { i128, i1 } @cmpxchg_strong(ptr addrspace(200) %ptr, i128 %exp, i128 %new) nounwind { +; PURECAP-LABEL: cmpxchg_strong: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cincoffset csp, csp, -48 +; PURECAP-NEXT: csc cra, 32(csp) # 16-byte Folded Spill +; PURECAP-NEXT: csc cs0, 16(csp) # 16-byte Folded Spill +; PURECAP-NEXT: mv a6, a5 +; PURECAP-NEXT: mv a7, a4 +; PURECAP-NEXT: cmove ct0, ca1 +; PURECAP-NEXT: cmove cs0, ca0 +; PURECAP-NEXT: csd a3, 8(csp) +; PURECAP-NEXT: csd a2, 0(csp) +; PURECAP-NEXT: cincoffset ca0, csp, 0 +; PURECAP-NEXT: csetbounds ca1, ca0, 16 +; PURECAP-NEXT: li a4, 5 +; PURECAP-NEXT: li a5, 5 +; PURECAP-NEXT: cmove ca0, ct0 +; PURECAP-NEXT: mv a2, a7 +; PURECAP-NEXT: mv a3, a6 +; PURECAP-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-NEXT: cld a1, 8(csp) +; PURECAP-NEXT: cld a2, 0(csp) +; PURECAP-NEXT: csd a1, 8(cs0) +; PURECAP-NEXT: csd a2, 0(cs0) +; PURECAP-NEXT: csb a0, 16(cs0) +; PURECAP-NEXT: clc cra, 32(csp) # 16-byte Folded Reload +; PURECAP-NEXT: clc cs0, 16(csp) # 16-byte Folded Reload +; PURECAP-NEXT: cincoffset csp, csp, 48 +; PURECAP-NEXT: cret +; +; HYBRID-LABEL: cmpxchg_strong: +; HYBRID: # %bb.0: +; HYBRID-NEXT: addi sp, sp, -32 +; HYBRID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; HYBRID-NEXT: mv a6, a5 +; HYBRID-NEXT: mv a7, a4 +; HYBRID-NEXT: mv t0, a1 +; HYBRID-NEXT: mv s0, a0 +; HYBRID-NEXT: sd a3, 8(sp) +; HYBRID-NEXT: sd a2, 0(sp) +; HYBRID-NEXT: mv a1, sp +; HYBRID-NEXT: li a4, 5 +; HYBRID-NEXT: li a5, 5 +; HYBRID-NEXT: mv a0, t0 +; HYBRID-NEXT: mv a2, a7 +; HYBRID-NEXT: mv a3, a6 +; HYBRID-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-NEXT: ld a1, 8(sp) +; HYBRID-NEXT: ld a2, 0(sp) +; HYBRID-NEXT: sd a1, 8(s0) +; HYBRID-NEXT: sd a2, 0(s0) +; HYBRID-NEXT: sb a0, 16(s0) +; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; HYBRID-NEXT: addi sp, sp, 32 +; HYBRID-NEXT: ret +; +; HYBRID-CAP-PTR-LABEL: cmpxchg_strong: +; HYBRID-CAP-PTR: # %bb.0: +; HYBRID-CAP-PTR-NEXT: addi sp, sp, -32 +; HYBRID-CAP-PTR-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-NEXT: mv a6, a5 +; HYBRID-CAP-PTR-NEXT: mv a7, a4 +; HYBRID-CAP-PTR-NEXT: cmove ct0, ca1 +; HYBRID-CAP-PTR-NEXT: mv s0, a0 +; HYBRID-CAP-PTR-NEXT: sd a3, 8(sp) +; HYBRID-CAP-PTR-NEXT: sd a2, 0(sp) +; HYBRID-CAP-PTR-NEXT: mv a1, sp +; HYBRID-CAP-PTR-NEXT: li a4, 5 +; HYBRID-CAP-PTR-NEXT: li a5, 5 +; HYBRID-CAP-PTR-NEXT: cmove ca0, ct0 +; HYBRID-CAP-PTR-NEXT: mv a2, a7 +; HYBRID-CAP-PTR-NEXT: mv a3, a6 +; HYBRID-CAP-PTR-NEXT: call __atomic_compare_exchange_16_c@plt +; HYBRID-CAP-PTR-NEXT: ld a1, 8(sp) +; HYBRID-CAP-PTR-NEXT: ld a2, 0(sp) +; HYBRID-CAP-PTR-NEXT: sd a1, 8(s0) +; HYBRID-CAP-PTR-NEXT: sd a2, 0(s0) +; HYBRID-CAP-PTR-NEXT: sb a0, 16(s0) +; HYBRID-CAP-PTR-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-NEXT: addi sp, sp, 32 +; HYBRID-CAP-PTR-NEXT: ret +; PURECAP-IR-LABEL: define {{[^@]+}}@cmpxchg_strong +; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) addrspace(200) #[[ATTR0]] { +; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) +; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: store i128 [[EXP]], ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 5, i32 5) +; PURECAP-IR-NEXT: [[TMP3:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 +; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP4:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP3]], 0 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = insertvalue { i128, i1 } [[TMP4]], i1 [[TMP2]], 1 +; PURECAP-IR-NEXT: ret { i128, i1 } [[TMP5]] +; +; HYBRID-IR-LABEL: define {{[^@]+}}@cmpxchg_strong +; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) #[[ATTR0]] { +; HYBRID-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16 +; HYBRID-IR-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: store i128 [[EXP]], ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_16_c(ptr addrspace(200) [[PTR]], ptr [[TMP1]], i128 [[NEW]], i32 5, i32 5) +; HYBRID-IR-NEXT: [[TMP3:%.*]] = load i128, ptr [[TMP1]], align 16 +; HYBRID-IR-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP4:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP3]], 0 +; HYBRID-IR-NEXT: [[TMP5:%.*]] = insertvalue { i128, i1 } [[TMP4]], i1 [[TMP2]], 1 +; HYBRID-IR-NEXT: ret { i128, i1 } [[TMP5]] +; + %1 = cmpxchg ptr addrspace(200) %ptr, i128 %exp, i128 %new seq_cst seq_cst + ret { i128, i1 } %1 +} From 3af2a62170c746785f44fb274c9cae68733b2022 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Wed, 20 Sep 2023 07:58:35 -0700 Subject: [PATCH 07/18] [CHERI-RISC-V] Support inline atomic loads for 2*XLen integers We can perform an atomic capability load and extract the raw bits. As can be seen from the test case, the IR-level conversion to i128 using shift+or is converted to a direct read into the register holding the high part of an i128. --- llvm/include/llvm/CodeGen/TargetLowering.h | 12 +++- llvm/lib/CodeGen/AtomicExpandPass.cpp | 47 +++++++++++++ llvm/lib/Target/Mips/MipsISelLowering.h | 1 + llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 18 +++++ llvm/lib/Target/RISCV/RISCVISelLowering.h | 3 + .../RISCV32/atomic-cap-size-int.ll | 66 +++++++++++++------ .../RISCV64/atomic-cap-size-int.ll | 66 +++++++++++++------ .../CodeGen/RISCV/cheri/atomic-load-store.ll | 39 +++++------ .../RISCV/cheri/atomic-load-store64.ll | 39 +++++------ 9 files changed, 200 insertions(+), 91 deletions(-) diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index efaaf45dc447..d107eda95b84 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -253,9 +253,11 @@ class TargetLoweringBase { /// support for these atomic instructions, and also have different options /// w.r.t. what they should expand to. enum class AtomicExpansionKind { - None, // Don't expand the instruction. - CastToInteger, // Cast the atomic instruction to another type, e.g. from - // floating-point to integer type. + None, // Don't expand the instruction. + CastToInteger, // Cast the atomic instruction to another type, e.g. from + // floating-point to integer type. + CheriCapability, // Perform the operation using a CHERI capability. This + // is only support for capability-size integers. LLSC, // Expand the instruction into loadlinked/storeconditional; used // by ARM/AArch64. LLOnly, // Expand the (load) instruction into just a load-linked, which has @@ -3124,6 +3126,10 @@ class TargetLoweringBase { return SDValue(); } virtual bool hasCapabilitySetAddress() const { return false; } + virtual unsigned cheriCapabilityAddressSpace() const { + llvm_unreachable("Not implemented for this target"); + return ~0u; + } MVT cheriCapabilityType() const { return CapType; } bool cheriCapabilityTypeHasPreciseBounds() const { return CapTypeHasPreciseBounds; diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp index ae1ddc6e3b2f..e5c3ec2f04ab 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -74,6 +74,7 @@ class AtomicExpand : public FunctionPass { bool bracketInstWithFences(Instruction *I, AtomicOrdering Order); IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL); LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI); + LoadInst *convertAtomicLoadToCapabilityType(LoadInst *LI); bool tryExpandAtomicLoad(LoadInst *LI); bool expandAtomicLoadToLL(LoadInst *LI); bool expandAtomicLoadToCmpXchg(LoadInst *LI); @@ -421,6 +422,9 @@ bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) { case TargetLoweringBase::AtomicExpansionKind::NotAtomic: LI->setAtomic(AtomicOrdering::NotAtomic); return true; + case TargetLoweringBase::AtomicExpansionKind::CheriCapability: + convertAtomicLoadToCapabilityType(LI); + return true; default: llvm_unreachable("Unhandled case in tryExpandAtomicLoad"); } @@ -506,6 +510,49 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) { return NewSI; } +/// Convert a capability to an integer with the same bitwidth. +/// For a 129-bit capability return get an i128 value with the raw bits. +static Value *integerFromSameSizeCapability(Value *Cap, IRBuilderBase &Builder, + const DataLayout &DL) { + auto *IntTy = Builder.getIntNTy(DL.getTypeSizeInBits(Cap->getType())); + auto *CapRangeTy = DL.getIndexType(Cap->getType()); + auto *LowBits = Builder.CreateIntrinsic(Intrinsic::cheri_cap_address_get, + {CapRangeTy}, {Cap}); + auto *HighBits = Builder.CreateIntrinsic(Intrinsic::cheri_cap_high_get, + {CapRangeTy}, {Cap}); + unsigned CapRange = + DL.getIndexSizeInBits(Cap->getType()->getPointerAddressSpace()); + return Builder.CreateOr( + Builder.CreateZExt(LowBits, IntTy), + Builder.CreateShl(Builder.CreateZExt(HighBits, IntTy), CapRange)); +} + +LoadInst *AtomicExpand::convertAtomicLoadToCapabilityType(LoadInst *LI) { + IRBuilder<> Builder(LI); + unsigned CapAS = TLI->cheriCapabilityAddressSpace(); + Value *Addr = LI->getPointerOperand(); + auto CapTy = Addr->getType()->isOpaquePointerTy() + ? Builder.getPtrTy(CapAS) + : Builder.getInt8PtrTy(CapAS); + Value *NewAddr = + Addr->getType()->isOpaquePointerTy() + ? Addr + : Builder.CreateBitCast( + Addr, PointerType::get( + CapTy, Addr->getType()->getPointerAddressSpace())); + + auto *NewLI = Builder.CreateLoad(CapTy, NewAddr, LI->isVolatile()); + NewLI->setAlignment(LI->getAlign()); + NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID()); + Value *NewVal = integerFromSameSizeCapability( + NewLI, Builder, LI->getModule()->getDataLayout()); + LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n" + << *NewVal); + LI->replaceAllUsesWith(NewVal); + LI->eraseFromParent(); + return NewLI; +} + void AtomicExpand::expandAtomicStore(StoreInst *SI) { // This function is only called on atomic stores that are too large to be // atomic if implemented as a native store. So we replace them by an diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h index dab8c6795813..b6bd4c9e4f61 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.h +++ b/llvm/lib/Target/Mips/MipsISelLowering.h @@ -407,6 +407,7 @@ extern bool LargeCapTable; SDValue getCapabilityEqualExact(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG) const override; bool hasCapabilitySetAddress() const override { return true; } + unsigned cheriCapabilityAddressSpace() const override { return 200; } TailPaddingAmount getTailPaddingForPreciseBounds(uint64_t Size) const override; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index e0238f8a48d5..87d0609cfa41 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12729,6 +12729,16 @@ EVT RISCVTargetLowering::getOptimalMemOpType( return TargetLowering::getOptimalMemOpType(Op, FuncAttributes); } +TargetLowering::AtomicExpansionKind +RISCVTargetLowering::shouldExpandAtomicLoadInIR(llvm::LoadInst *LI) const { + if (Subtarget.hasCheri() && + LI->getType()->isIntegerTy( + Subtarget.typeForCapabilities().getSizeInBits())) { + return AtomicExpansionKind::CheriCapability; + } + return AtomicExpansionKind::None; +} + TargetLowering::AtomicExpansionKind RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating @@ -12884,6 +12894,14 @@ bool RISCVTargetLowering::supportsAtomicOperation(const DataLayout &DL, !RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()) && (isa(AI) || isa(AI))) return false; + // For CHERI i128/i64 loads can be expanded with capability operations. + // Using capability pointers in hybrid mode is not yet supported for this + // as we are missing some required patterns. + if (Subtarget.hasStdExtA() && Subtarget.hasCheri() && isa(AI) && + ValueTy->isIntegerTy(Subtarget.typeForCapabilities().getSizeInBits()) && + DL.isFatPointer(PointerTy) == + RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) + return true; return TargetLowering::supportsAtomicOperation(DL, AI, ValueTy, PointerTy, Alignment); } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 240c50faee3f..3e0eaa9e302a 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -568,6 +568,8 @@ class RISCVTargetLowering : public TargetLowering { TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; + AtomicExpansionKind + shouldExpandAtomicLoadInIR(llvm::LoadInst *LI) const override; Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, @@ -733,6 +735,7 @@ class RISCVTargetLowering : public TargetLowering { SDValue getCapabilityEqualExact(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG) const override; bool hasCapabilitySetAddress() const override { return true; } + unsigned cheriCapabilityAddressSpace() const override { return 200; } TailPaddingAmount getTailPaddingForPreciseBounds(uint64_t Size) const override; diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll index f80c30bef993..559af1609aa5 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll @@ -81,25 +81,43 @@ define i64 @store(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @load(ptr addrspace(200) %ptr) nounwind { -; PURECAP-LABEL: load: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: li a1, 5 -; PURECAP-NEXT: ccall __atomic_load_8 -; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret +; PURECAP-ATOMICS-LABEL: load: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clc ca1, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cgethigh a1, ca1 +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret ; -; HYBRID-LABEL: load: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: li a1, 5 -; HYBRID-NEXT: call __atomic_load_8@plt -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-LIBCALLS-LABEL: load: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_8 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lc ca1, 0(a0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: cgethigh a1, ca1 +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_8@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: load: ; HYBRID-CAP-PTR: # %bb.0: @@ -112,8 +130,16 @@ define i64 @load(ptr addrspace(200) %ptr) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@load ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_load_8(ptr addrspace(200) [[PTR]], i32 5) -; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[PTR]] monotonic, align 8 +; PURECAP-IR-NEXT: [[TMP2:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP3:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP4:%.*]] = zext i32 [[TMP2]] to i64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = zext i32 [[TMP3]] to i64 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = or i64 [[TMP4]], [[TMP6]] +; PURECAP-IR-NEXT: fence acquire +; PURECAP-IR-NEXT: ret i64 [[TMP7]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@load ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll index f1139a4b261a..11b0cd4dddff 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll @@ -81,25 +81,43 @@ define i128 @store(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @load(ptr addrspace(200) %ptr) nounwind { -; PURECAP-LABEL: load: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill -; PURECAP-NEXT: li a1, 5 -; PURECAP-NEXT: ccall __atomic_load_16 -; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret +; PURECAP-ATOMICS-LABEL: load: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clc ca1, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cgethigh a1, ca1 +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret ; -; HYBRID-LABEL: load: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; HYBRID-NEXT: li a1, 5 -; HYBRID-NEXT: call __atomic_load_16@plt -; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-LIBCALLS-LABEL: load: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_16 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lc ca1, 0(a0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: cgethigh a1, ca1 +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_16@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: load: ; HYBRID-CAP-PTR: # %bb.0: @@ -112,8 +130,16 @@ define i128 @load(ptr addrspace(200) %ptr) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@load ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_load_16(ptr addrspace(200) [[PTR]], i32 5) -; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; PURECAP-IR-NEXT: fence seq_cst +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[PTR]] monotonic, align 16 +; PURECAP-IR-NEXT: [[TMP2:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP1]]) +; PURECAP-IR-NEXT: [[TMP4:%.*]] = zext i64 [[TMP2]] to i128 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = zext i64 [[TMP3]] to i128 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = shl i128 [[TMP5]], 64 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = or i128 [[TMP4]], [[TMP6]] +; PURECAP-IR-NEXT: fence acquire +; PURECAP-IR-NEXT: ret i128 [[TMP7]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@load ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/CodeGen/RISCV/cheri/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/cheri/atomic-load-store.ll index c82b3e85f641..c0269a6a35e6 100644 --- a/llvm/test/CodeGen/RISCV/cheri/atomic-load-store.ll +++ b/llvm/test/CodeGen/RISCV/cheri/atomic-load-store.ll @@ -447,12 +447,9 @@ define i64 @atomic_load_i64_unordered(i64 addrspace(200)* %a) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_load_i64_unordered: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a1, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_load_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clc ca1, 0(ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_load_i64_unordered: @@ -486,12 +483,9 @@ define i64 @atomic_load_i64_monotonic(i64 addrspace(200)* %a) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_load_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a1, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_load_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clc ca1, 0(ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_load_i64_monotonic: @@ -525,12 +519,10 @@ define i64 @atomic_load_i64_acquire(i64 addrspace(200)* %a) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_load_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a1, 2 -; RV32IAXCHERI-NEXT: ccall __atomic_load_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clc ca1, 0(ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 +; RV32IAXCHERI-NEXT: fence r, rw ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_load_i64_acquire: @@ -565,12 +557,11 @@ define i64 @atomic_load_i64_seq_cst(i64 addrspace(200)* %a) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_load_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a1, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_load_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: fence rw, rw +; RV32IAXCHERI-NEXT: clc ca1, 0(ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 +; RV32IAXCHERI-NEXT: fence r, rw ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_load_i64_seq_cst: diff --git a/llvm/test/CodeGen/RISCV/cheri/atomic-load-store64.ll b/llvm/test/CodeGen/RISCV/cheri/atomic-load-store64.ll index c82b3e85f641..c0269a6a35e6 100644 --- a/llvm/test/CodeGen/RISCV/cheri/atomic-load-store64.ll +++ b/llvm/test/CodeGen/RISCV/cheri/atomic-load-store64.ll @@ -447,12 +447,9 @@ define i64 @atomic_load_i64_unordered(i64 addrspace(200)* %a) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_load_i64_unordered: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a1, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_load_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clc ca1, 0(ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_load_i64_unordered: @@ -486,12 +483,9 @@ define i64 @atomic_load_i64_monotonic(i64 addrspace(200)* %a) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_load_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a1, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_load_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clc ca1, 0(ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_load_i64_monotonic: @@ -525,12 +519,10 @@ define i64 @atomic_load_i64_acquire(i64 addrspace(200)* %a) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_load_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a1, 2 -; RV32IAXCHERI-NEXT: ccall __atomic_load_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clc ca1, 0(ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 +; RV32IAXCHERI-NEXT: fence r, rw ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_load_i64_acquire: @@ -565,12 +557,11 @@ define i64 @atomic_load_i64_seq_cst(i64 addrspace(200)* %a) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_load_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a1, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_load_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: fence rw, rw +; RV32IAXCHERI-NEXT: clc ca1, 0(ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 +; RV32IAXCHERI-NEXT: fence r, rw ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_load_i64_seq_cst: From 75ce1734a51b1d8c616c83073a36d9bb8074f1c7 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Wed, 20 Sep 2023 08:05:50 -0700 Subject: [PATCH 08/18] [CHERI-RISC-V] Support inline atomic stores for 2*XLen integers We can perform an atomic capability store from the raw bits. As can be seen from the test case, the IR-level i128 shift is elided and directly replaced with a csethigh. --- llvm/lib/CodeGen/AtomicExpandPass.cpp | 66 ++++++++++--- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 22 +++-- llvm/lib/Target/RISCV/RISCVISelLowering.h | 2 + .../RISCV32/atomic-cap-size-int.ll | 95 ++++++++++++------- .../RISCV64/atomic-cap-size-int.ll | 95 ++++++++++++------- .../CodeGen/RISCV/cheri/atomic-load-store.ll | 38 +++----- .../RISCV/cheri/atomic-load-store64.ll | 38 +++----- 7 files changed, 220 insertions(+), 136 deletions(-) diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp index e5c3ec2f04ab..d514d797736e 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -79,6 +79,7 @@ class AtomicExpand : public FunctionPass { bool expandAtomicLoadToLL(LoadInst *LI); bool expandAtomicLoadToCmpXchg(LoadInst *LI); StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI); + StoreInst *convertAtomicStoreToCapabilityType(StoreInst *SI); bool tryExpandAtomicStore(StoreInst *SI); void expandAtomicStore(StoreInst *SI); bool tryExpandAtomicRMW(AtomicRMWInst *AI); @@ -440,6 +441,9 @@ bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) { case TargetLoweringBase::AtomicExpansionKind::NotAtomic: SI->setAtomic(AtomicOrdering::NotAtomic); return true; + case TargetLoweringBase::AtomicExpansionKind::CheriCapability: + convertAtomicStoreToCapabilityType(SI); + return true; default: llvm_unreachable("Unhandled case in tryExpandAtomicStore"); } @@ -510,6 +514,22 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) { return NewSI; } +/// Convert a capability to an integer with the same bitwidth. +/// For a 128-bit integer return get a capability from the raw bits. +static Value *integerToSameSizeCapability(Value *Int, IRBuilderBase &Builder, + Type *CapTy, const DataLayout &DL) { + unsigned CapRange = DL.getIndexSizeInBits(CapTy->getPointerAddressSpace()); + auto *CapRangeTy = DL.getIndexType(CapTy); + assert(Int->getType()->isIntegerTy(DL.getTypeSizeInBits(CapTy))); + auto CapWithLowBits = Builder.CreateGEP( + Builder.getInt8Ty(), ConstantPointerNull::get(cast(CapTy)), + {Int}); + auto HighBits = Builder.CreateLShr(Int, CapRange); + return Builder.CreateIntrinsic( + Intrinsic::cheri_cap_high_set, {CapRangeTy}, + {CapWithLowBits, Builder.CreateTrunc(HighBits, CapRangeTy)}, nullptr); +} + /// Convert a capability to an integer with the same bitwidth. /// For a 129-bit capability return get an i128 value with the raw bits. static Value *integerFromSameSizeCapability(Value *Cap, IRBuilderBase &Builder, @@ -527,20 +547,26 @@ static Value *integerFromSameSizeCapability(Value *Cap, IRBuilderBase &Builder, Builder.CreateShl(Builder.CreateZExt(HighBits, IntTy), CapRange)); } -LoadInst *AtomicExpand::convertAtomicLoadToCapabilityType(LoadInst *LI) { - IRBuilder<> Builder(LI); +static Value *getCapAddr(Value *Addr, Type **CapTy, IRBuilderBase &Builder, + const TargetLowering *TLI) { + // If the old address was an opaque pointer, we can reuse that as the pointer + // value and return and opaque pointer in the capability AS. unsigned CapAS = TLI->cheriCapabilityAddressSpace(); - Value *Addr = LI->getPointerOperand(); - auto CapTy = Addr->getType()->isOpaquePointerTy() - ? Builder.getPtrTy(CapAS) - : Builder.getInt8PtrTy(CapAS); - Value *NewAddr = - Addr->getType()->isOpaquePointerTy() - ? Addr - : Builder.CreateBitCast( - Addr, PointerType::get( - CapTy, Addr->getType()->getPointerAddressSpace())); + if (Addr->getType()->isOpaquePointerTy()) { + *CapTy = Builder.getPtrTy(CapAS); + return Addr; + } else { + *CapTy = Builder.getInt8PtrTy(CapAS); + return Builder.CreateBitCast( + Addr, + PointerType::get(*CapTy, Addr->getType()->getPointerAddressSpace())); + } +} +LoadInst *AtomicExpand::convertAtomicLoadToCapabilityType(LoadInst *LI) { + IRBuilder<> Builder(LI); + Type *CapTy; + Value *NewAddr = getCapAddr(LI->getPointerOperand(), &CapTy, Builder, TLI); auto *NewLI = Builder.CreateLoad(CapTy, NewAddr, LI->isVolatile()); NewLI->setAlignment(LI->getAlign()); NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID()); @@ -553,6 +579,22 @@ LoadInst *AtomicExpand::convertAtomicLoadToCapabilityType(LoadInst *LI) { return NewLI; } +StoreInst * +AtomicExpand::convertAtomicStoreToCapabilityType(llvm::StoreInst *SI) { + IRBuilder<> Builder(SI); + Type *CapTy; + Value *NewAddr = getCapAddr(SI->getPointerOperand(), &CapTy, Builder, TLI); + Value *NewVal = integerToSameSizeCapability( + SI->getValueOperand(), Builder, CapTy, SI->getModule()->getDataLayout()); + StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr, SI->isVolatile()); + NewSI->setAlignment(SI->getAlign()); + NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID()); + LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n" + << *NewVal << "\n"); + SI->eraseFromParent(); + return NewSI; +} + void AtomicExpand::expandAtomicStore(StoreInst *SI) { // This function is only called on atomic stores that are too large to be // atomic if implemented as a native store. So we replace them by an diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 87d0609cfa41..bb7fe5a54bc0 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12739,6 +12739,16 @@ RISCVTargetLowering::shouldExpandAtomicLoadInIR(llvm::LoadInst *LI) const { return AtomicExpansionKind::None; } +TargetLowering::AtomicExpansionKind +RISCVTargetLowering::shouldExpandAtomicStoreInIR(llvm::StoreInst *SI) const { + if (Subtarget.hasCheri() && + SI->getValueOperand()->getType()->isIntegerTy( + Subtarget.typeForCapabilities().getSizeInBits())) { + return AtomicExpansionKind::CheriCapability; + } + return AtomicExpansionKind::None; +} + TargetLowering::AtomicExpansionKind RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating @@ -12890,17 +12900,17 @@ bool RISCVTargetLowering::supportsAtomicOperation(const DataLayout &DL, // FIXME: we current have to expand CMPXCHG/RMW to libcalls since we are // missing the SelectionDAG nodes+expansions to use the explicit addressing // mode instructions. - if (DL.isFatPointer(PointerTy) && - !RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()) && + bool IsPureCapABI = RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()); + if (DL.isFatPointer(PointerTy) && !IsPureCapABI && (isa(AI) || isa(AI))) return false; - // For CHERI i128/i64 loads can be expanded with capability operations. + // For CHERI i128/i64 loads/stores can be expanded with capability operations. // Using capability pointers in hybrid mode is not yet supported for this // as we are missing some required patterns. - if (Subtarget.hasStdExtA() && Subtarget.hasCheri() && isa(AI) && + if (Subtarget.hasStdExtA() && Subtarget.hasCheri() && + (isa(AI) || isa(AI)) && ValueTy->isIntegerTy(Subtarget.typeForCapabilities().getSizeInBits()) && - DL.isFatPointer(PointerTy) == - RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) + DL.isFatPointer(PointerTy) == IsPureCapABI) return true; return TargetLowering::supportsAtomicOperation(DL, AI, ValueTy, PointerTy, Alignment); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 3e0eaa9e302a..2f9abf83b1a9 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -570,6 +570,8 @@ class RISCVTargetLowering : public TargetLowering { shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; AtomicExpansionKind shouldExpandAtomicLoadInIR(llvm::LoadInst *LI) const override; + AtomicExpansionKind + shouldExpandAtomicStoreInIR(llvm::StoreInst *SI) const override; Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll index 559af1609aa5..ba3df0157d67 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll @@ -13,41 +13,61 @@ ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -mattr=-a < %s | FileCheck %s --check-prefixes=HYBRID-CAP-PTR,HYBRID-CAP-PTR-LIBCALLS --allow-unused-prefixes define i64 @store(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: store: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -32 -; PURECAP-NEXT: csc cra, 24(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs1, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: mv s0, a2 -; PURECAP-NEXT: mv s1, a1 -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_store_8 -; PURECAP-NEXT: mv a0, s1 -; PURECAP-NEXT: mv a1, s0 -; PURECAP-NEXT: clc cra, 24(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs1, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 32 -; PURECAP-NEXT: cret +; PURECAP-ATOMICS-LABEL: store: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: cincoffset ca3, cnull, a1 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca3, a2 +; PURECAP-ATOMICS-NEXT: csc ca3, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: mv a1, a2 +; PURECAP-ATOMICS-NEXT: cret ; -; HYBRID-LABEL: store: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; HYBRID-NEXT: mv s0, a2 -; HYBRID-NEXT: mv s1, a1 -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_store_8@plt -; HYBRID-NEXT: mv a0, s1 -; HYBRID-NEXT: mv a1, s0 -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s1, 4(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-LIBCALLS-LABEL: store: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 24(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs1, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv s0, a2 +; PURECAP-LIBCALLS-NEXT: mv s1, a1 +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_8 +; PURECAP-LIBCALLS-NEXT: mv a0, s1 +; PURECAP-LIBCALLS-NEXT: mv a1, s0 +; PURECAP-LIBCALLS-NEXT: clc cra, 24(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs1, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: cincoffset ca3, cnull, a1 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca3, a2 +; HYBRID-ATOMICS-NEXT: sc ca3, 0(a0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: mv a1, a2 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a2 +; HYBRID-LIBCALLS-NEXT: mv s1, a1 +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_8@plt +; HYBRID-LIBCALLS-NEXT: mv a0, s1 +; HYBRID-LIBCALLS-NEXT: mv a1, s0 +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: store: ; HYBRID-CAP-PTR: # %bb.0: @@ -68,7 +88,12 @@ define i64 @store(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@store ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { -; PURECAP-IR-NEXT: call void @__atomic_store_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; PURECAP-IR-NEXT: fence release +; PURECAP-IR-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = lshr i64 [[VAL]], 32 +; PURECAP-IR-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP1]], i32 [[TMP3]]) +; PURECAP-IR-NEXT: store atomic ptr addrspace(200) [[TMP4]], ptr addrspace(200) [[PTR]] monotonic, align 8 ; PURECAP-IR-NEXT: ret i64 [[VAL]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@store diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll index 11b0cd4dddff..503aa3bb4a8d 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll @@ -13,41 +13,61 @@ ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -mattr=-a < %s | FileCheck %s --check-prefixes=HYBRID-CAP-PTR,HYBRID-CAP-PTR-LIBCALLS --allow-unused-prefixes define i128 @store(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: store: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -48 -; PURECAP-NEXT: csc cra, 32(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs0, 16(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs1, 0(csp) # 16-byte Folded Spill -; PURECAP-NEXT: mv s0, a2 -; PURECAP-NEXT: mv s1, a1 -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_store_16 -; PURECAP-NEXT: mv a0, s1 -; PURECAP-NEXT: mv a1, s0 -; PURECAP-NEXT: clc cra, 32(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs0, 16(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs1, 0(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 48 -; PURECAP-NEXT: cret +; PURECAP-ATOMICS-LABEL: store: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: cincoffset ca3, cnull, a1 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca3, a2 +; PURECAP-ATOMICS-NEXT: csc ca3, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: mv a1, a2 +; PURECAP-ATOMICS-NEXT: cret ; -; HYBRID-LABEL: store: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -32 -; HYBRID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; HYBRID-NEXT: mv s0, a2 -; HYBRID-NEXT: mv s1, a1 -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_store_16@plt -; HYBRID-NEXT: mv a0, s1 -; HYBRID-NEXT: mv a1, s0 -; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s1, 8(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 32 -; HYBRID-NEXT: ret +; PURECAP-LIBCALLS-LABEL: store: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -48 +; PURECAP-LIBCALLS-NEXT: csc cra, 32(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs1, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv s0, a2 +; PURECAP-LIBCALLS-NEXT: mv s1, a1 +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_16 +; PURECAP-LIBCALLS-NEXT: mv a0, s1 +; PURECAP-LIBCALLS-NEXT: mv a1, s0 +; PURECAP-LIBCALLS-NEXT: clc cra, 32(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs1, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 48 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: cincoffset ca3, cnull, a1 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca3, a2 +; HYBRID-ATOMICS-NEXT: sc ca3, 0(a0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: mv a1, a2 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -32 +; HYBRID-LIBCALLS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a2 +; HYBRID-LIBCALLS-NEXT: mv s1, a1 +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_16@plt +; HYBRID-LIBCALLS-NEXT: mv a0, s1 +; HYBRID-LIBCALLS-NEXT: mv a1, s0 +; HYBRID-LIBCALLS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 32 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: store: ; HYBRID-CAP-PTR: # %bb.0: @@ -68,7 +88,12 @@ define i128 @store(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@store ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { -; PURECAP-IR-NEXT: call void @__atomic_store_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; PURECAP-IR-NEXT: fence release +; PURECAP-IR-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = lshr i128 [[VAL]], 64 +; PURECAP-IR-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i64 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP1]], i64 [[TMP3]]) +; PURECAP-IR-NEXT: store atomic ptr addrspace(200) [[TMP4]], ptr addrspace(200) [[PTR]] monotonic, align 16 ; PURECAP-IR-NEXT: ret i128 [[VAL]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@store diff --git a/llvm/test/CodeGen/RISCV/cheri/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/cheri/atomic-load-store.ll index c0269a6a35e6..1b171480ab3b 100644 --- a/llvm/test/CodeGen/RISCV/cheri/atomic-load-store.ll +++ b/llvm/test/CodeGen/RISCV/cheri/atomic-load-store.ll @@ -1017,12 +1017,9 @@ define void @atomic_store_i64_unordered(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomic_store_i64_unordered: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_store_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: csc ca1, 0(ca0) ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_store_i64_unordered: @@ -1056,12 +1053,9 @@ define void @atomic_store_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomic_store_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_store_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: csc ca1, 0(ca0) ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_store_i64_monotonic: @@ -1095,12 +1089,10 @@ define void @atomic_store_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_store_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 3 -; RV32IAXCHERI-NEXT: ccall __atomic_store_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: fence rw, w +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: csc ca1, 0(ca0) ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_store_i64_release: @@ -1135,12 +1127,10 @@ define void @atomic_store_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_store_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_store_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: fence rw, w +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: csc ca1, 0(ca0) ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_store_i64_seq_cst: diff --git a/llvm/test/CodeGen/RISCV/cheri/atomic-load-store64.ll b/llvm/test/CodeGen/RISCV/cheri/atomic-load-store64.ll index c0269a6a35e6..1b171480ab3b 100644 --- a/llvm/test/CodeGen/RISCV/cheri/atomic-load-store64.ll +++ b/llvm/test/CodeGen/RISCV/cheri/atomic-load-store64.ll @@ -1017,12 +1017,9 @@ define void @atomic_store_i64_unordered(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomic_store_i64_unordered: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_store_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: csc ca1, 0(ca0) ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_store_i64_unordered: @@ -1056,12 +1053,9 @@ define void @atomic_store_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomic_store_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_store_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: csc ca1, 0(ca0) ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_store_i64_monotonic: @@ -1095,12 +1089,10 @@ define void @atomic_store_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_store_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 3 -; RV32IAXCHERI-NEXT: ccall __atomic_store_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: fence rw, w +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: csc ca1, 0(ca0) ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_store_i64_release: @@ -1135,12 +1127,10 @@ define void @atomic_store_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomic_store_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_store_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: fence rw, w +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: csc ca1, 0(ca0) ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomic_store_i64_seq_cst: From ae50bd673f9ae7d21403536d1d3e5df9be95dd9c Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Wed, 20 Sep 2023 18:33:34 -0700 Subject: [PATCH 09/18] [CHERI-RISC-V] Support inline cmpxchg for 2*XLen integers Now that we have an exact bit on cmpxchg we can lower 2*XLen integer cmpxchg using a capability operation with the raw bits. While this also compares the tag bit in addition to the data bits this should not matter. IMO writing a valid capability with the same bit pattern but a different tag bit to the location should be treated as a conflicting store. --- llvm/lib/CodeGen/AtomicExpandPass.cpp | 37 ++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 7 +- .../RISCV32/atomic-cap-size-int.ll | 362 +++++++++++------- .../CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll | 154 +++++--- .../RISCV64/atomic-cap-size-int.ll | 362 +++++++++++------- .../CodeGen/RISCV/cheri/atomic-cmpxchg.ll | 274 ++++++------- 6 files changed, 729 insertions(+), 467 deletions(-) diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp index d514d797736e..0ebe61286eda 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -100,6 +100,7 @@ class AtomicExpand : public FunctionPass { void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI); AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI); + AtomicCmpXchgInst *convertCmpXchgToCapabilityType(AtomicCmpXchgInst *CI); static Value * insertRMWCmpXchgLoop(IRBuilder<> &Builder, Type *ResultType, Value *Addr, Align AddrAlign, AtomicOrdering MemOpOrder, @@ -595,6 +596,39 @@ AtomicExpand::convertAtomicStoreToCapabilityType(llvm::StoreInst *SI) { return NewSI; } +AtomicCmpXchgInst * +AtomicExpand::convertCmpXchgToCapabilityType(llvm::AtomicCmpXchgInst *CI) { + IRBuilder<> Builder(CI); + Type *CapTy; + Value *NewAddr = getCapAddr(CI->getPointerOperand(), &CapTy, Builder, TLI); + const DataLayout &DL = CI->getModule()->getDataLayout(); + Value *NewCmp = + integerToSameSizeCapability(CI->getCompareOperand(), Builder, CapTy, DL); + Value *NewNewVal = + integerToSameSizeCapability(CI->getNewValOperand(), Builder, CapTy, DL); + + auto *NewCI = Builder.CreateAtomicCmpXchg( + NewAddr, NewCmp, NewNewVal, CI->getAlign(), CI->getSuccessOrdering(), + CI->getFailureOrdering(), CI->getSyncScopeID()); + NewCI->setVolatile(CI->isVolatile()); + NewCI->setWeak(CI->isWeak()); + // We need to compare all 64/128 bits not just the address. + NewCI->setExactCompare(true); + LLVM_DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n"); + + Value *OldVal = Builder.CreateExtractValue(NewCI, 0); + Value *Succ = Builder.CreateExtractValue(NewCI, 1); + OldVal = integerFromSameSizeCapability(OldVal, Builder, DL); + + Value *Res = UndefValue::get(CI->getType()); + Res = Builder.CreateInsertValue(Res, OldVal, 0); + Res = Builder.CreateInsertValue(Res, Succ, 1); + + CI->replaceAllUsesWith(Res); + CI->eraseFromParent(); + return NewCI; +} + void AtomicExpand::expandAtomicStore(StoreInst *SI) { // This function is only called on atomic stores that are too large to be // atomic if implemented as a native store. So we replace them by an @@ -1607,6 +1641,9 @@ bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) { case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: expandAtomicCmpXchgToMaskedIntrinsic(CI); return true; + case TargetLoweringBase::AtomicExpansionKind::CheriCapability: + convertCmpXchgToCapabilityType(CI); + return true; case TargetLoweringBase::AtomicExpansionKind::NotAtomic: return lowerAtomicCmpXchgInst(CI); } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index bb7fe5a54bc0..39d32f6cbef1 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12866,6 +12866,11 @@ RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( if ((Size == 8 || Size == 16) && !RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) return AtomicExpansionKind::MaskedIntrinsic; + if (Subtarget.hasCheri() && + CI->getNewValOperand()->getType()->isIntegerTy( + Subtarget.typeForCapabilities().getSizeInBits())) { + return AtomicExpansionKind::CheriCapability; + } return AtomicExpansionKind::None; } @@ -12908,7 +12913,7 @@ bool RISCVTargetLowering::supportsAtomicOperation(const DataLayout &DL, // Using capability pointers in hybrid mode is not yet supported for this // as we are missing some required patterns. if (Subtarget.hasStdExtA() && Subtarget.hasCheri() && - (isa(AI) || isa(AI)) && + (isa(AI) || isa(AI) || isa(AI)) && ValueTy->isIntegerTy(Subtarget.typeForCapabilities().getSizeInBits()) && DL.isFatPointer(PointerTy) == IsPureCapABI) return true; diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll index ba3df0157d67..409b838df530 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll @@ -1342,62 +1342,104 @@ define i64 @atomic_umin(ptr addrspace(200) %ptr, i64 %val) nounwind { } define { i64, i1 } @cmpxchg_weak(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { -; PURECAP-LABEL: cmpxchg_weak: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -32 -; PURECAP-NEXT: csc cra, 24(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill -; PURECAP-NEXT: mv a6, a5 -; PURECAP-NEXT: mv a7, a4 -; PURECAP-NEXT: cmove ct0, ca1 -; PURECAP-NEXT: cmove cs0, ca0 -; PURECAP-NEXT: csw a3, 12(csp) -; PURECAP-NEXT: csw a2, 8(csp) -; PURECAP-NEXT: cincoffset ca0, csp, 8 -; PURECAP-NEXT: csetbounds ca1, ca0, 8 -; PURECAP-NEXT: li a4, 4 -; PURECAP-NEXT: li a5, 2 -; PURECAP-NEXT: cmove ca0, ct0 -; PURECAP-NEXT: mv a2, a7 -; PURECAP-NEXT: mv a3, a6 -; PURECAP-NEXT: ccall __atomic_compare_exchange_8 -; PURECAP-NEXT: clw a1, 12(csp) -; PURECAP-NEXT: clw a2, 8(csp) -; PURECAP-NEXT: csw a1, 4(cs0) -; PURECAP-NEXT: csw a2, 0(cs0) -; PURECAP-NEXT: csb a0, 8(cs0) -; PURECAP-NEXT: clc cra, 24(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 32 -; PURECAP-NEXT: cret +; PURECAP-ATOMICS-LABEL: cmpxchg_weak: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cincoffset ca2, cnull, a2 +; PURECAP-ATOMICS-NEXT: csethigh ca2, ca2, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca3, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca3, a5 +; PURECAP-ATOMICS-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: clr.c.aq ca4, (ca1) +; PURECAP-ATOMICS-NEXT: cseqx a5, ca4, ca2 +; PURECAP-ATOMICS-NEXT: beqz a5, .LBB13_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1 +; PURECAP-ATOMICS-NEXT: csc.c.aq a5, ca3, (ca1) +; PURECAP-ATOMICS-NEXT: bnez a5, .LBB13_1 +; PURECAP-ATOMICS-NEXT: .LBB13_3: +; PURECAP-ATOMICS-NEXT: cseqx a1, ca4, ca2 +; PURECAP-ATOMICS-NEXT: cgethigh a2, ca4 +; PURECAP-ATOMICS-NEXT: csw a2, 4(ca0) +; PURECAP-ATOMICS-NEXT: csw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: csb a1, 8(ca0) +; PURECAP-ATOMICS-NEXT: cret ; -; HYBRID-LABEL: cmpxchg_weak: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; HYBRID-NEXT: mv a6, a5 -; HYBRID-NEXT: mv a7, a4 -; HYBRID-NEXT: mv t0, a1 -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: sw a3, 4(sp) -; HYBRID-NEXT: sw a2, 0(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a4, 4 -; HYBRID-NEXT: li a5, 2 -; HYBRID-NEXT: mv a0, t0 -; HYBRID-NEXT: mv a2, a7 -; HYBRID-NEXT: mv a3, a6 -; HYBRID-NEXT: call __atomic_compare_exchange_8@plt -; HYBRID-NEXT: lw a1, 4(sp) -; HYBRID-NEXT: lw a2, 0(sp) -; HYBRID-NEXT: sw a1, 4(s0) -; HYBRID-NEXT: sw a2, 0(s0) -; HYBRID-NEXT: sb a0, 8(s0) -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-LIBCALLS-LABEL: cmpxchg_weak: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 24(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv a6, a5 +; PURECAP-LIBCALLS-NEXT: mv a7, a4 +; PURECAP-LIBCALLS-NEXT: cmove ct0, ca1 +; PURECAP-LIBCALLS-NEXT: cmove cs0, ca0 +; PURECAP-LIBCALLS-NEXT: csw a3, 12(csp) +; PURECAP-LIBCALLS-NEXT: csw a2, 8(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 8 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca0, 8 +; PURECAP-LIBCALLS-NEXT: li a4, 4 +; PURECAP-LIBCALLS-NEXT: li a5, 2 +; PURECAP-LIBCALLS-NEXT: cmove ca0, ct0 +; PURECAP-LIBCALLS-NEXT: mv a2, a7 +; PURECAP-LIBCALLS-NEXT: mv a3, a6 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-LIBCALLS-NEXT: clw a1, 12(csp) +; PURECAP-LIBCALLS-NEXT: clw a2, 8(csp) +; PURECAP-LIBCALLS-NEXT: csw a1, 4(cs0) +; PURECAP-LIBCALLS-NEXT: csw a2, 0(cs0) +; PURECAP-LIBCALLS-NEXT: csb a0, 8(cs0) +; PURECAP-LIBCALLS-NEXT: clc cra, 24(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: cmpxchg_weak: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: cincoffset ca2, cnull, a2 +; HYBRID-ATOMICS-NEXT: csethigh ca2, ca2, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca3, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca3, a5 +; HYBRID-ATOMICS-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: lr.c.aq ca4, (a1) +; HYBRID-ATOMICS-NEXT: cseqx a5, ca4, ca2 +; HYBRID-ATOMICS-NEXT: beqz a5, .LBB13_3 +; HYBRID-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1 +; HYBRID-ATOMICS-NEXT: sc.c.aq a5, ca3, (a1) +; HYBRID-ATOMICS-NEXT: bnez a5, .LBB13_1 +; HYBRID-ATOMICS-NEXT: .LBB13_3: +; HYBRID-ATOMICS-NEXT: cseqx a1, ca4, ca2 +; HYBRID-ATOMICS-NEXT: cgethigh a2, ca4 +; HYBRID-ATOMICS-NEXT: sw a2, 4(a0) +; HYBRID-ATOMICS-NEXT: sw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: sb a1, 8(a0) +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: cmpxchg_weak: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv a6, a5 +; HYBRID-LIBCALLS-NEXT: mv a7, a4 +; HYBRID-LIBCALLS-NEXT: mv t0, a1 +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: sw a3, 4(sp) +; HYBRID-LIBCALLS-NEXT: sw a2, 0(sp) +; HYBRID-LIBCALLS-NEXT: mv a1, sp +; HYBRID-LIBCALLS-NEXT: li a4, 4 +; HYBRID-LIBCALLS-NEXT: li a5, 2 +; HYBRID-LIBCALLS-NEXT: mv a0, t0 +; HYBRID-LIBCALLS-NEXT: mv a2, a7 +; HYBRID-LIBCALLS-NEXT: mv a3, a6 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-LIBCALLS-NEXT: lw a1, 4(sp) +; HYBRID-LIBCALLS-NEXT: lw a2, 0(sp) +; HYBRID-LIBCALLS-NEXT: sw a1, 4(s0) +; HYBRID-LIBCALLS-NEXT: sw a2, 0(s0) +; HYBRID-LIBCALLS-NEXT: sb a0, 8(s0) +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: cmpxchg_weak: ; HYBRID-CAP-PTR: # %bb.0: @@ -1428,15 +1470,26 @@ define { i64, i1 } @cmpxchg_weak(ptr addrspace(200) %ptr, i64 %exp, i64 %new) no ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@cmpxchg_weak ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[EXP:%.*]], i64 [[NEW:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i64 [[EXP]], ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 4, i32 2) -; PURECAP-IR-NEXT: [[TMP3:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP4:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP3]], 0 -; PURECAP-IR-NEXT: [[TMP5:%.*]] = insertvalue { i64, i1 } [[TMP4]], i1 [[TMP2]], 1 -; PURECAP-IR-NEXT: ret { i64, i1 } [[TMP5]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[EXP]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = lshr i64 [[EXP]], 32 +; PURECAP-IR-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP1]], i32 [[TMP3]]) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP6:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i32 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP5]], i32 [[TMP7]]) +; PURECAP-IR-NEXT: [[TMP9:%.*]] = cmpxchg weak exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP4]], ptr addrspace(200) [[TMP8]] acq_rel acquire, align 8 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP9]], 0 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP9]], 1 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP10]]) +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP10]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64 +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i32 [[TMP13]] to i64 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = shl i64 [[TMP15]], 32 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = or i64 [[TMP14]], [[TMP16]] +; PURECAP-IR-NEXT: [[TMP18:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP17]], 0 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i64, i1 } [[TMP18]], i1 [[TMP11]], 1 +; PURECAP-IR-NEXT: ret { i64, i1 } [[TMP19]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@cmpxchg_weak ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[EXP:%.*]], i64 [[NEW:%.*]]) #[[ATTR0]] { @@ -1455,62 +1508,104 @@ define { i64, i1 } @cmpxchg_weak(ptr addrspace(200) %ptr, i64 %exp, i64 %new) no } define { i64, i1 } @cmpxchg_strong(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { -; PURECAP-LABEL: cmpxchg_strong: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -32 -; PURECAP-NEXT: csc cra, 24(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill -; PURECAP-NEXT: mv a6, a5 -; PURECAP-NEXT: mv a7, a4 -; PURECAP-NEXT: cmove ct0, ca1 -; PURECAP-NEXT: cmove cs0, ca0 -; PURECAP-NEXT: csw a3, 12(csp) -; PURECAP-NEXT: csw a2, 8(csp) -; PURECAP-NEXT: cincoffset ca0, csp, 8 -; PURECAP-NEXT: csetbounds ca1, ca0, 8 -; PURECAP-NEXT: li a4, 5 -; PURECAP-NEXT: li a5, 5 -; PURECAP-NEXT: cmove ca0, ct0 -; PURECAP-NEXT: mv a2, a7 -; PURECAP-NEXT: mv a3, a6 -; PURECAP-NEXT: ccall __atomic_compare_exchange_8 -; PURECAP-NEXT: clw a1, 12(csp) -; PURECAP-NEXT: clw a2, 8(csp) -; PURECAP-NEXT: csw a1, 4(cs0) -; PURECAP-NEXT: csw a2, 0(cs0) -; PURECAP-NEXT: csb a0, 8(cs0) -; PURECAP-NEXT: clc cra, 24(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 32 -; PURECAP-NEXT: cret +; PURECAP-ATOMICS-LABEL: cmpxchg_strong: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cincoffset ca2, cnull, a2 +; PURECAP-ATOMICS-NEXT: csethigh ca2, ca2, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca3, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca3, a5 +; PURECAP-ATOMICS-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca4, (ca1) +; PURECAP-ATOMICS-NEXT: cseqx a5, ca4, ca2 +; PURECAP-ATOMICS-NEXT: beqz a5, .LBB14_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB14_1 Depth=1 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a5, ca3, (ca1) +; PURECAP-ATOMICS-NEXT: bnez a5, .LBB14_1 +; PURECAP-ATOMICS-NEXT: .LBB14_3: +; PURECAP-ATOMICS-NEXT: cseqx a1, ca4, ca2 +; PURECAP-ATOMICS-NEXT: cgethigh a2, ca4 +; PURECAP-ATOMICS-NEXT: csw a2, 4(ca0) +; PURECAP-ATOMICS-NEXT: csw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: csb a1, 8(ca0) +; PURECAP-ATOMICS-NEXT: cret ; -; HYBRID-LABEL: cmpxchg_strong: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; HYBRID-NEXT: mv a6, a5 -; HYBRID-NEXT: mv a7, a4 -; HYBRID-NEXT: mv t0, a1 -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: sw a3, 4(sp) -; HYBRID-NEXT: sw a2, 0(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a4, 5 -; HYBRID-NEXT: li a5, 5 -; HYBRID-NEXT: mv a0, t0 -; HYBRID-NEXT: mv a2, a7 -; HYBRID-NEXT: mv a3, a6 -; HYBRID-NEXT: call __atomic_compare_exchange_8@plt -; HYBRID-NEXT: lw a1, 4(sp) -; HYBRID-NEXT: lw a2, 0(sp) -; HYBRID-NEXT: sw a1, 4(s0) -; HYBRID-NEXT: sw a2, 0(s0) -; HYBRID-NEXT: sb a0, 8(s0) -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-LIBCALLS-LABEL: cmpxchg_strong: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 24(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv a6, a5 +; PURECAP-LIBCALLS-NEXT: mv a7, a4 +; PURECAP-LIBCALLS-NEXT: cmove ct0, ca1 +; PURECAP-LIBCALLS-NEXT: cmove cs0, ca0 +; PURECAP-LIBCALLS-NEXT: csw a3, 12(csp) +; PURECAP-LIBCALLS-NEXT: csw a2, 8(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 8 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca0, 8 +; PURECAP-LIBCALLS-NEXT: li a4, 5 +; PURECAP-LIBCALLS-NEXT: li a5, 5 +; PURECAP-LIBCALLS-NEXT: cmove ca0, ct0 +; PURECAP-LIBCALLS-NEXT: mv a2, a7 +; PURECAP-LIBCALLS-NEXT: mv a3, a6 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-LIBCALLS-NEXT: clw a1, 12(csp) +; PURECAP-LIBCALLS-NEXT: clw a2, 8(csp) +; PURECAP-LIBCALLS-NEXT: csw a1, 4(cs0) +; PURECAP-LIBCALLS-NEXT: csw a2, 0(cs0) +; PURECAP-LIBCALLS-NEXT: csb a0, 8(cs0) +; PURECAP-LIBCALLS-NEXT: clc cra, 24(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: cmpxchg_strong: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: cincoffset ca2, cnull, a2 +; HYBRID-ATOMICS-NEXT: csethigh ca2, ca2, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca3, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca3, a5 +; HYBRID-ATOMICS-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca4, (a1) +; HYBRID-ATOMICS-NEXT: cseqx a5, ca4, ca2 +; HYBRID-ATOMICS-NEXT: beqz a5, .LBB14_3 +; HYBRID-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB14_1 Depth=1 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a5, ca3, (a1) +; HYBRID-ATOMICS-NEXT: bnez a5, .LBB14_1 +; HYBRID-ATOMICS-NEXT: .LBB14_3: +; HYBRID-ATOMICS-NEXT: cseqx a1, ca4, ca2 +; HYBRID-ATOMICS-NEXT: cgethigh a2, ca4 +; HYBRID-ATOMICS-NEXT: sw a2, 4(a0) +; HYBRID-ATOMICS-NEXT: sw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: sb a1, 8(a0) +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: cmpxchg_strong: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv a6, a5 +; HYBRID-LIBCALLS-NEXT: mv a7, a4 +; HYBRID-LIBCALLS-NEXT: mv t0, a1 +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: sw a3, 4(sp) +; HYBRID-LIBCALLS-NEXT: sw a2, 0(sp) +; HYBRID-LIBCALLS-NEXT: mv a1, sp +; HYBRID-LIBCALLS-NEXT: li a4, 5 +; HYBRID-LIBCALLS-NEXT: li a5, 5 +; HYBRID-LIBCALLS-NEXT: mv a0, t0 +; HYBRID-LIBCALLS-NEXT: mv a2, a7 +; HYBRID-LIBCALLS-NEXT: mv a3, a6 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-LIBCALLS-NEXT: lw a1, 4(sp) +; HYBRID-LIBCALLS-NEXT: lw a2, 0(sp) +; HYBRID-LIBCALLS-NEXT: sw a1, 4(s0) +; HYBRID-LIBCALLS-NEXT: sw a2, 0(s0) +; HYBRID-LIBCALLS-NEXT: sb a0, 8(s0) +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: cmpxchg_strong: ; HYBRID-CAP-PTR: # %bb.0: @@ -1541,15 +1636,26 @@ define { i64, i1 } @cmpxchg_strong(ptr addrspace(200) %ptr, i64 %exp, i64 %new) ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@cmpxchg_strong ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[EXP:%.*]], i64 [[NEW:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i64 [[EXP]], ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 5, i32 5) -; PURECAP-IR-NEXT: [[TMP3:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP4:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP3]], 0 -; PURECAP-IR-NEXT: [[TMP5:%.*]] = insertvalue { i64, i1 } [[TMP4]], i1 [[TMP2]], 1 -; PURECAP-IR-NEXT: ret { i64, i1 } [[TMP5]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[EXP]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = lshr i64 [[EXP]], 32 +; PURECAP-IR-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP1]], i32 [[TMP3]]) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP6:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i32 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP5]], i32 [[TMP7]]) +; PURECAP-IR-NEXT: [[TMP9:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP4]], ptr addrspace(200) [[TMP8]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP9]], 0 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP9]], 1 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP10]]) +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP10]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64 +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i32 [[TMP13]] to i64 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = shl i64 [[TMP15]], 32 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = or i64 [[TMP14]], [[TMP16]] +; PURECAP-IR-NEXT: [[TMP18:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP17]], 0 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i64, i1 } [[TMP18]], i1 [[TMP11]], 1 +; PURECAP-IR-NEXT: ret { i64, i1 } [[TMP19]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@cmpxchg_strong ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[EXP:%.*]], i64 [[NEW:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll index cd7e07361f17..b39179281b55 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-cap-ptr.ll @@ -172,34 +172,55 @@ define { i32, i1 } @test_cmpxchg_strong_i32(ptr addrspace(200) %ptr, i32 %exp, i } define { i64, i1 } @test_cmpxchg_strong_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { -; PURECAP-LABEL: test_cmpxchg_strong_i64: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -32 -; PURECAP-NEXT: csc cra, 24(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill -; PURECAP-NEXT: mv a6, a5 -; PURECAP-NEXT: mv a7, a4 -; PURECAP-NEXT: cmove ct0, ca1 -; PURECAP-NEXT: cmove cs0, ca0 -; PURECAP-NEXT: csw a3, 12(csp) -; PURECAP-NEXT: csw a2, 8(csp) -; PURECAP-NEXT: cincoffset ca0, csp, 8 -; PURECAP-NEXT: csetbounds ca1, ca0, 8 -; PURECAP-NEXT: li a4, 4 -; PURECAP-NEXT: li a5, 2 -; PURECAP-NEXT: cmove ca0, ct0 -; PURECAP-NEXT: mv a2, a7 -; PURECAP-NEXT: mv a3, a6 -; PURECAP-NEXT: ccall __atomic_compare_exchange_8 -; PURECAP-NEXT: clw a1, 12(csp) -; PURECAP-NEXT: clw a2, 8(csp) -; PURECAP-NEXT: csw a1, 4(cs0) -; PURECAP-NEXT: csw a2, 0(cs0) -; PURECAP-NEXT: csb a0, 8(cs0) -; PURECAP-NEXT: clc cra, 24(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 32 -; PURECAP-NEXT: cret +; PURECAP-ATOMICS-LABEL: test_cmpxchg_strong_i64: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cincoffset ca2, cnull, a2 +; PURECAP-ATOMICS-NEXT: csethigh ca2, ca2, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca3, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca3, a5 +; PURECAP-ATOMICS-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: clr.c.aq ca4, (ca1) +; PURECAP-ATOMICS-NEXT: cseqx a5, ca4, ca2 +; PURECAP-ATOMICS-NEXT: beqz a5, .LBB3_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB3_1 Depth=1 +; PURECAP-ATOMICS-NEXT: csc.c.aq a5, ca3, (ca1) +; PURECAP-ATOMICS-NEXT: bnez a5, .LBB3_1 +; PURECAP-ATOMICS-NEXT: .LBB3_3: +; PURECAP-ATOMICS-NEXT: cseqx a1, ca4, ca2 +; PURECAP-ATOMICS-NEXT: cgethigh a2, ca4 +; PURECAP-ATOMICS-NEXT: csw a2, 4(ca0) +; PURECAP-ATOMICS-NEXT: csw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: csb a1, 8(ca0) +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: test_cmpxchg_strong_i64: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 24(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv a6, a5 +; PURECAP-LIBCALLS-NEXT: mv a7, a4 +; PURECAP-LIBCALLS-NEXT: cmove ct0, ca1 +; PURECAP-LIBCALLS-NEXT: cmove cs0, ca0 +; PURECAP-LIBCALLS-NEXT: csw a3, 12(csp) +; PURECAP-LIBCALLS-NEXT: csw a2, 8(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 8 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca0, 8 +; PURECAP-LIBCALLS-NEXT: li a4, 4 +; PURECAP-LIBCALLS-NEXT: li a5, 2 +; PURECAP-LIBCALLS-NEXT: cmove ca0, ct0 +; PURECAP-LIBCALLS-NEXT: mv a2, a7 +; PURECAP-LIBCALLS-NEXT: mv a3, a6 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-LIBCALLS-NEXT: clw a1, 12(csp) +; PURECAP-LIBCALLS-NEXT: clw a2, 8(csp) +; PURECAP-LIBCALLS-NEXT: csw a1, 4(cs0) +; PURECAP-LIBCALLS-NEXT: csw a2, 0(cs0) +; PURECAP-LIBCALLS-NEXT: csb a0, 8(cs0) +; PURECAP-LIBCALLS-NEXT: clc cra, 24(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret ; ; HYBRID-LABEL: test_cmpxchg_strong_i64: ; HYBRID: # %bb.0: @@ -502,34 +523,55 @@ define { i32, i1 } @test_cmpxchg_weak_i32(ptr addrspace(200) %ptr, i32 %exp, i32 } define { i64, i1 } @test_cmpxchg_weak_i64(ptr addrspace(200) %ptr, i64 %exp, i64 %new) nounwind { -; PURECAP-LABEL: test_cmpxchg_weak_i64: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -32 -; PURECAP-NEXT: csc cra, 24(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill -; PURECAP-NEXT: mv a6, a5 -; PURECAP-NEXT: mv a7, a4 -; PURECAP-NEXT: cmove ct0, ca1 -; PURECAP-NEXT: cmove cs0, ca0 -; PURECAP-NEXT: csw a3, 12(csp) -; PURECAP-NEXT: csw a2, 8(csp) -; PURECAP-NEXT: cincoffset ca0, csp, 8 -; PURECAP-NEXT: csetbounds ca1, ca0, 8 -; PURECAP-NEXT: li a4, 4 -; PURECAP-NEXT: li a5, 2 -; PURECAP-NEXT: cmove ca0, ct0 -; PURECAP-NEXT: mv a2, a7 -; PURECAP-NEXT: mv a3, a6 -; PURECAP-NEXT: ccall __atomic_compare_exchange_8 -; PURECAP-NEXT: clw a1, 12(csp) -; PURECAP-NEXT: clw a2, 8(csp) -; PURECAP-NEXT: csw a1, 4(cs0) -; PURECAP-NEXT: csw a2, 0(cs0) -; PURECAP-NEXT: csb a0, 8(cs0) -; PURECAP-NEXT: clc cra, 24(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 32 -; PURECAP-NEXT: cret +; PURECAP-ATOMICS-LABEL: test_cmpxchg_weak_i64: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cincoffset ca2, cnull, a2 +; PURECAP-ATOMICS-NEXT: csethigh ca2, ca2, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca3, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca3, a5 +; PURECAP-ATOMICS-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: clr.c.aq ca4, (ca1) +; PURECAP-ATOMICS-NEXT: cseqx a5, ca4, ca2 +; PURECAP-ATOMICS-NEXT: beqz a5, .LBB9_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 +; PURECAP-ATOMICS-NEXT: csc.c.aq a5, ca3, (ca1) +; PURECAP-ATOMICS-NEXT: bnez a5, .LBB9_1 +; PURECAP-ATOMICS-NEXT: .LBB9_3: +; PURECAP-ATOMICS-NEXT: cseqx a1, ca4, ca2 +; PURECAP-ATOMICS-NEXT: cgethigh a2, ca4 +; PURECAP-ATOMICS-NEXT: csw a2, 4(ca0) +; PURECAP-ATOMICS-NEXT: csw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: csb a1, 8(ca0) +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: test_cmpxchg_weak_i64: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 24(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 16(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv a6, a5 +; PURECAP-LIBCALLS-NEXT: mv a7, a4 +; PURECAP-LIBCALLS-NEXT: cmove ct0, ca1 +; PURECAP-LIBCALLS-NEXT: cmove cs0, ca0 +; PURECAP-LIBCALLS-NEXT: csw a3, 12(csp) +; PURECAP-LIBCALLS-NEXT: csw a2, 8(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 8 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca0, 8 +; PURECAP-LIBCALLS-NEXT: li a4, 4 +; PURECAP-LIBCALLS-NEXT: li a5, 2 +; PURECAP-LIBCALLS-NEXT: cmove ca0, ct0 +; PURECAP-LIBCALLS-NEXT: mv a2, a7 +; PURECAP-LIBCALLS-NEXT: mv a3, a6 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-LIBCALLS-NEXT: clw a1, 12(csp) +; PURECAP-LIBCALLS-NEXT: clw a2, 8(csp) +; PURECAP-LIBCALLS-NEXT: csw a1, 4(cs0) +; PURECAP-LIBCALLS-NEXT: csw a2, 0(cs0) +; PURECAP-LIBCALLS-NEXT: csb a0, 8(cs0) +; PURECAP-LIBCALLS-NEXT: clc cra, 24(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 16(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret ; ; HYBRID-LABEL: test_cmpxchg_weak_i64: ; HYBRID: # %bb.0: diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll index 503aa3bb4a8d..80416d25a1bc 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll @@ -1342,62 +1342,104 @@ define i128 @atomic_umin(ptr addrspace(200) %ptr, i128 %val) nounwind { } define { i128, i1 } @cmpxchg_weak(ptr addrspace(200) %ptr, i128 %exp, i128 %new) nounwind { -; PURECAP-LABEL: cmpxchg_weak: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -48 -; PURECAP-NEXT: csc cra, 32(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs0, 16(csp) # 16-byte Folded Spill -; PURECAP-NEXT: mv a6, a5 -; PURECAP-NEXT: mv a7, a4 -; PURECAP-NEXT: cmove ct0, ca1 -; PURECAP-NEXT: cmove cs0, ca0 -; PURECAP-NEXT: csd a3, 8(csp) -; PURECAP-NEXT: csd a2, 0(csp) -; PURECAP-NEXT: cincoffset ca0, csp, 0 -; PURECAP-NEXT: csetbounds ca1, ca0, 16 -; PURECAP-NEXT: li a4, 4 -; PURECAP-NEXT: li a5, 2 -; PURECAP-NEXT: cmove ca0, ct0 -; PURECAP-NEXT: mv a2, a7 -; PURECAP-NEXT: mv a3, a6 -; PURECAP-NEXT: ccall __atomic_compare_exchange_16 -; PURECAP-NEXT: cld a1, 8(csp) -; PURECAP-NEXT: cld a2, 0(csp) -; PURECAP-NEXT: csd a1, 8(cs0) -; PURECAP-NEXT: csd a2, 0(cs0) -; PURECAP-NEXT: csb a0, 16(cs0) -; PURECAP-NEXT: clc cra, 32(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs0, 16(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 48 -; PURECAP-NEXT: cret +; PURECAP-ATOMICS-LABEL: cmpxchg_weak: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cincoffset ca2, cnull, a2 +; PURECAP-ATOMICS-NEXT: csethigh ca2, ca2, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca3, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca3, a5 +; PURECAP-ATOMICS-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: clr.c.aq ca4, (ca1) +; PURECAP-ATOMICS-NEXT: cseqx a5, ca4, ca2 +; PURECAP-ATOMICS-NEXT: beqz a5, .LBB13_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1 +; PURECAP-ATOMICS-NEXT: csc.c.aq a5, ca3, (ca1) +; PURECAP-ATOMICS-NEXT: bnez a5, .LBB13_1 +; PURECAP-ATOMICS-NEXT: .LBB13_3: +; PURECAP-ATOMICS-NEXT: cseqx a1, ca4, ca2 +; PURECAP-ATOMICS-NEXT: cgethigh a2, ca4 +; PURECAP-ATOMICS-NEXT: csd a2, 8(ca0) +; PURECAP-ATOMICS-NEXT: csd a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: csb a1, 16(ca0) +; PURECAP-ATOMICS-NEXT: cret ; -; HYBRID-LABEL: cmpxchg_weak: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -32 -; HYBRID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; HYBRID-NEXT: mv a6, a5 -; HYBRID-NEXT: mv a7, a4 -; HYBRID-NEXT: mv t0, a1 -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: sd a3, 8(sp) -; HYBRID-NEXT: sd a2, 0(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a4, 4 -; HYBRID-NEXT: li a5, 2 -; HYBRID-NEXT: mv a0, t0 -; HYBRID-NEXT: mv a2, a7 -; HYBRID-NEXT: mv a3, a6 -; HYBRID-NEXT: call __atomic_compare_exchange_16@plt -; HYBRID-NEXT: ld a1, 8(sp) -; HYBRID-NEXT: ld a2, 0(sp) -; HYBRID-NEXT: sd a1, 8(s0) -; HYBRID-NEXT: sd a2, 0(s0) -; HYBRID-NEXT: sb a0, 16(s0) -; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 32 -; HYBRID-NEXT: ret +; PURECAP-LIBCALLS-LABEL: cmpxchg_weak: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -48 +; PURECAP-LIBCALLS-NEXT: csc cra, 32(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv a6, a5 +; PURECAP-LIBCALLS-NEXT: mv a7, a4 +; PURECAP-LIBCALLS-NEXT: cmove ct0, ca1 +; PURECAP-LIBCALLS-NEXT: cmove cs0, ca0 +; PURECAP-LIBCALLS-NEXT: csd a3, 8(csp) +; PURECAP-LIBCALLS-NEXT: csd a2, 0(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca0, 16 +; PURECAP-LIBCALLS-NEXT: li a4, 4 +; PURECAP-LIBCALLS-NEXT: li a5, 2 +; PURECAP-LIBCALLS-NEXT: cmove ca0, ct0 +; PURECAP-LIBCALLS-NEXT: mv a2, a7 +; PURECAP-LIBCALLS-NEXT: mv a3, a6 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-LIBCALLS-NEXT: cld a1, 8(csp) +; PURECAP-LIBCALLS-NEXT: cld a2, 0(csp) +; PURECAP-LIBCALLS-NEXT: csd a1, 8(cs0) +; PURECAP-LIBCALLS-NEXT: csd a2, 0(cs0) +; PURECAP-LIBCALLS-NEXT: csb a0, 16(cs0) +; PURECAP-LIBCALLS-NEXT: clc cra, 32(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 48 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: cmpxchg_weak: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: cincoffset ca2, cnull, a2 +; HYBRID-ATOMICS-NEXT: csethigh ca2, ca2, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca3, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca3, a5 +; HYBRID-ATOMICS-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: lr.c.aq ca4, (a1) +; HYBRID-ATOMICS-NEXT: cseqx a5, ca4, ca2 +; HYBRID-ATOMICS-NEXT: beqz a5, .LBB13_3 +; HYBRID-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1 +; HYBRID-ATOMICS-NEXT: sc.c.aq a5, ca3, (a1) +; HYBRID-ATOMICS-NEXT: bnez a5, .LBB13_1 +; HYBRID-ATOMICS-NEXT: .LBB13_3: +; HYBRID-ATOMICS-NEXT: cseqx a1, ca4, ca2 +; HYBRID-ATOMICS-NEXT: cgethigh a2, ca4 +; HYBRID-ATOMICS-NEXT: sd a2, 8(a0) +; HYBRID-ATOMICS-NEXT: sd a4, 0(a0) +; HYBRID-ATOMICS-NEXT: sb a1, 16(a0) +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: cmpxchg_weak: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -32 +; HYBRID-LIBCALLS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv a6, a5 +; HYBRID-LIBCALLS-NEXT: mv a7, a4 +; HYBRID-LIBCALLS-NEXT: mv t0, a1 +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: sd a3, 8(sp) +; HYBRID-LIBCALLS-NEXT: sd a2, 0(sp) +; HYBRID-LIBCALLS-NEXT: mv a1, sp +; HYBRID-LIBCALLS-NEXT: li a4, 4 +; HYBRID-LIBCALLS-NEXT: li a5, 2 +; HYBRID-LIBCALLS-NEXT: mv a0, t0 +; HYBRID-LIBCALLS-NEXT: mv a2, a7 +; HYBRID-LIBCALLS-NEXT: mv a3, a6 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-LIBCALLS-NEXT: ld a1, 8(sp) +; HYBRID-LIBCALLS-NEXT: ld a2, 0(sp) +; HYBRID-LIBCALLS-NEXT: sd a1, 8(s0) +; HYBRID-LIBCALLS-NEXT: sd a2, 0(s0) +; HYBRID-LIBCALLS-NEXT: sb a0, 16(s0) +; HYBRID-LIBCALLS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 32 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: cmpxchg_weak: ; HYBRID-CAP-PTR: # %bb.0: @@ -1428,15 +1470,26 @@ define { i128, i1 } @cmpxchg_weak(ptr addrspace(200) %ptr, i128 %exp, i128 %new) ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@cmpxchg_weak ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i128 [[EXP]], ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 4, i32 2) -; PURECAP-IR-NEXT: [[TMP3:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP4:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP3]], 0 -; PURECAP-IR-NEXT: [[TMP5:%.*]] = insertvalue { i128, i1 } [[TMP4]], i1 [[TMP2]], 1 -; PURECAP-IR-NEXT: ret { i128, i1 } [[TMP5]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[EXP]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = lshr i128 [[EXP]], 64 +; PURECAP-IR-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i64 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP1]], i64 [[TMP3]]) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP6:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = trunc i128 [[TMP6]] to i64 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP5]], i64 [[TMP7]]) +; PURECAP-IR-NEXT: [[TMP9:%.*]] = cmpxchg weak exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP4]], ptr addrspace(200) [[TMP8]] acq_rel acquire, align 16 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP9]], 0 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP9]], 1 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP10]]) +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP10]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = zext i64 [[TMP12]] to i128 +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i64 [[TMP13]] to i128 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = shl i128 [[TMP15]], 64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = or i128 [[TMP14]], [[TMP16]] +; PURECAP-IR-NEXT: [[TMP18:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP17]], 0 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i128, i1 } [[TMP18]], i1 [[TMP11]], 1 +; PURECAP-IR-NEXT: ret { i128, i1 } [[TMP19]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@cmpxchg_weak ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) #[[ATTR0]] { @@ -1455,62 +1508,104 @@ define { i128, i1 } @cmpxchg_weak(ptr addrspace(200) %ptr, i128 %exp, i128 %new) } define { i128, i1 } @cmpxchg_strong(ptr addrspace(200) %ptr, i128 %exp, i128 %new) nounwind { -; PURECAP-LABEL: cmpxchg_strong: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -48 -; PURECAP-NEXT: csc cra, 32(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs0, 16(csp) # 16-byte Folded Spill -; PURECAP-NEXT: mv a6, a5 -; PURECAP-NEXT: mv a7, a4 -; PURECAP-NEXT: cmove ct0, ca1 -; PURECAP-NEXT: cmove cs0, ca0 -; PURECAP-NEXT: csd a3, 8(csp) -; PURECAP-NEXT: csd a2, 0(csp) -; PURECAP-NEXT: cincoffset ca0, csp, 0 -; PURECAP-NEXT: csetbounds ca1, ca0, 16 -; PURECAP-NEXT: li a4, 5 -; PURECAP-NEXT: li a5, 5 -; PURECAP-NEXT: cmove ca0, ct0 -; PURECAP-NEXT: mv a2, a7 -; PURECAP-NEXT: mv a3, a6 -; PURECAP-NEXT: ccall __atomic_compare_exchange_16 -; PURECAP-NEXT: cld a1, 8(csp) -; PURECAP-NEXT: cld a2, 0(csp) -; PURECAP-NEXT: csd a1, 8(cs0) -; PURECAP-NEXT: csd a2, 0(cs0) -; PURECAP-NEXT: csb a0, 16(cs0) -; PURECAP-NEXT: clc cra, 32(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs0, 16(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 48 -; PURECAP-NEXT: cret +; PURECAP-ATOMICS-LABEL: cmpxchg_strong: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cincoffset ca2, cnull, a2 +; PURECAP-ATOMICS-NEXT: csethigh ca2, ca2, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca3, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca3, a5 +; PURECAP-ATOMICS-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca4, (ca1) +; PURECAP-ATOMICS-NEXT: cseqx a5, ca4, ca2 +; PURECAP-ATOMICS-NEXT: beqz a5, .LBB14_3 +; PURECAP-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB14_1 Depth=1 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a5, ca3, (ca1) +; PURECAP-ATOMICS-NEXT: bnez a5, .LBB14_1 +; PURECAP-ATOMICS-NEXT: .LBB14_3: +; PURECAP-ATOMICS-NEXT: cseqx a1, ca4, ca2 +; PURECAP-ATOMICS-NEXT: cgethigh a2, ca4 +; PURECAP-ATOMICS-NEXT: csd a2, 8(ca0) +; PURECAP-ATOMICS-NEXT: csd a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: csb a1, 16(ca0) +; PURECAP-ATOMICS-NEXT: cret ; -; HYBRID-LABEL: cmpxchg_strong: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -32 -; HYBRID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; HYBRID-NEXT: mv a6, a5 -; HYBRID-NEXT: mv a7, a4 -; HYBRID-NEXT: mv t0, a1 -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: sd a3, 8(sp) -; HYBRID-NEXT: sd a2, 0(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a4, 5 -; HYBRID-NEXT: li a5, 5 -; HYBRID-NEXT: mv a0, t0 -; HYBRID-NEXT: mv a2, a7 -; HYBRID-NEXT: mv a3, a6 -; HYBRID-NEXT: call __atomic_compare_exchange_16@plt -; HYBRID-NEXT: ld a1, 8(sp) -; HYBRID-NEXT: ld a2, 0(sp) -; HYBRID-NEXT: sd a1, 8(s0) -; HYBRID-NEXT: sd a2, 0(s0) -; HYBRID-NEXT: sb a0, 16(s0) -; HYBRID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 32 -; HYBRID-NEXT: ret +; PURECAP-LIBCALLS-LABEL: cmpxchg_strong: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -48 +; PURECAP-LIBCALLS-NEXT: csc cra, 32(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv a6, a5 +; PURECAP-LIBCALLS-NEXT: mv a7, a4 +; PURECAP-LIBCALLS-NEXT: cmove ct0, ca1 +; PURECAP-LIBCALLS-NEXT: cmove cs0, ca0 +; PURECAP-LIBCALLS-NEXT: csd a3, 8(csp) +; PURECAP-LIBCALLS-NEXT: csd a2, 0(csp) +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds ca1, ca0, 16 +; PURECAP-LIBCALLS-NEXT: li a4, 5 +; PURECAP-LIBCALLS-NEXT: li a5, 5 +; PURECAP-LIBCALLS-NEXT: cmove ca0, ct0 +; PURECAP-LIBCALLS-NEXT: mv a2, a7 +; PURECAP-LIBCALLS-NEXT: mv a3, a6 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-LIBCALLS-NEXT: cld a1, 8(csp) +; PURECAP-LIBCALLS-NEXT: cld a2, 0(csp) +; PURECAP-LIBCALLS-NEXT: csd a1, 8(cs0) +; PURECAP-LIBCALLS-NEXT: csd a2, 0(cs0) +; PURECAP-LIBCALLS-NEXT: csb a0, 16(cs0) +; PURECAP-LIBCALLS-NEXT: clc cra, 32(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 48 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: cmpxchg_strong: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: cincoffset ca2, cnull, a2 +; HYBRID-ATOMICS-NEXT: csethigh ca2, ca2, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca3, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca3, a5 +; HYBRID-ATOMICS-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca4, (a1) +; HYBRID-ATOMICS-NEXT: cseqx a5, ca4, ca2 +; HYBRID-ATOMICS-NEXT: beqz a5, .LBB14_3 +; HYBRID-ATOMICS-NEXT: # %bb.2: # in Loop: Header=BB14_1 Depth=1 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a5, ca3, (a1) +; HYBRID-ATOMICS-NEXT: bnez a5, .LBB14_1 +; HYBRID-ATOMICS-NEXT: .LBB14_3: +; HYBRID-ATOMICS-NEXT: cseqx a1, ca4, ca2 +; HYBRID-ATOMICS-NEXT: cgethigh a2, ca4 +; HYBRID-ATOMICS-NEXT: sd a2, 8(a0) +; HYBRID-ATOMICS-NEXT: sd a4, 0(a0) +; HYBRID-ATOMICS-NEXT: sb a1, 16(a0) +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: cmpxchg_strong: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -32 +; HYBRID-LIBCALLS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv a6, a5 +; HYBRID-LIBCALLS-NEXT: mv a7, a4 +; HYBRID-LIBCALLS-NEXT: mv t0, a1 +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: sd a3, 8(sp) +; HYBRID-LIBCALLS-NEXT: sd a2, 0(sp) +; HYBRID-LIBCALLS-NEXT: mv a1, sp +; HYBRID-LIBCALLS-NEXT: li a4, 5 +; HYBRID-LIBCALLS-NEXT: li a5, 5 +; HYBRID-LIBCALLS-NEXT: mv a0, t0 +; HYBRID-LIBCALLS-NEXT: mv a2, a7 +; HYBRID-LIBCALLS-NEXT: mv a3, a6 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-LIBCALLS-NEXT: ld a1, 8(sp) +; HYBRID-LIBCALLS-NEXT: ld a2, 0(sp) +; HYBRID-LIBCALLS-NEXT: sd a1, 8(s0) +; HYBRID-LIBCALLS-NEXT: sd a2, 0(s0) +; HYBRID-LIBCALLS-NEXT: sb a0, 16(s0) +; HYBRID-LIBCALLS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 32 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: cmpxchg_strong: ; HYBRID-CAP-PTR: # %bb.0: @@ -1541,15 +1636,26 @@ define { i128, i1 } @cmpxchg_strong(ptr addrspace(200) %ptr, i128 %exp, i128 %ne ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@cmpxchg_strong ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i128 [[EXP]], ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 5, i32 5) -; PURECAP-IR-NEXT: [[TMP3:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP4:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP3]], 0 -; PURECAP-IR-NEXT: [[TMP5:%.*]] = insertvalue { i128, i1 } [[TMP4]], i1 [[TMP2]], 1 -; PURECAP-IR-NEXT: ret { i128, i1 } [[TMP5]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[EXP]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = lshr i128 [[EXP]], 64 +; PURECAP-IR-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i64 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP1]], i64 [[TMP3]]) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP6:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP7:%.*]] = trunc i128 [[TMP6]] to i64 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP5]], i64 [[TMP7]]) +; PURECAP-IR-NEXT: [[TMP9:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP4]], ptr addrspace(200) [[TMP8]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP9]], 0 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP9]], 1 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP10]]) +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP10]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = zext i64 [[TMP12]] to i128 +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i64 [[TMP13]] to i128 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = shl i128 [[TMP15]], 64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = or i128 [[TMP14]], [[TMP16]] +; PURECAP-IR-NEXT: [[TMP18:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP17]], 0 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i128, i1 } [[TMP18]], i1 [[TMP11]], 1 +; PURECAP-IR-NEXT: ret { i128, i1 } [[TMP19]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@cmpxchg_strong ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[EXP:%.*]], i128 [[NEW:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/CodeGen/RISCV/cheri/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/cheri/atomic-cmpxchg.ll index ad3c047e691a..936e6d89361f 100644 --- a/llvm/test/CodeGen/RISCV/cheri/atomic-cmpxchg.ll +++ b/llvm/test/CodeGen/RISCV/cheri/atomic-cmpxchg.ll @@ -1779,20 +1779,18 @@ define void @cmpxchg_i64_monotonic_monotonic(i64 addrspace(200)* %ptr, i64 %cmp, ; ; RV32IAXCHERI-LABEL: cmpxchg_i64_monotonic_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cincoffset ca5, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds ca5, ca5, 8 -; RV32IAXCHERI-NEXT: csw a2, 4(csp) -; RV32IAXCHERI-NEXT: csw a1, 0(csp) -; RV32IAXCHERI-NEXT: cmove ca1, ca5 -; RV32IAXCHERI-NEXT: mv a2, a3 -; RV32IAXCHERI-NEXT: mv a3, a4 -; RV32IAXCHERI-NEXT: li a4, 0 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: cincoffset ca2, cnull, a3 +; RV32IAXCHERI-NEXT: csethigh ca2, ca2, a4 +; RV32IAXCHERI-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: clr.c ca3, (ca0) +; RV32IAXCHERI-NEXT: cseqx a4, ca3, ca1 +; RV32IAXCHERI-NEXT: beqz a4, .LBB30_3 +; RV32IAXCHERI-NEXT: # %bb.2: # in Loop: Header=BB30_1 Depth=1 +; RV32IAXCHERI-NEXT: csc.c a4, ca2, (ca0) +; RV32IAXCHERI-NEXT: bnez a4, .LBB30_1 +; RV32IAXCHERI-NEXT: .LBB30_3: ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: cmpxchg_i64_monotonic_monotonic: @@ -1846,21 +1844,18 @@ define void @cmpxchg_i64_acquire_monotonic(i64 addrspace(200)* %ptr, i64 %cmp, i ; ; RV32IAXCHERI-LABEL: cmpxchg_i64_acquire_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: mv a6, a4 -; RV32IAXCHERI-NEXT: cincoffset ca4, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds ca5, ca4, 8 -; RV32IAXCHERI-NEXT: csw a2, 4(csp) -; RV32IAXCHERI-NEXT: csw a1, 0(csp) -; RV32IAXCHERI-NEXT: li a4, 2 -; RV32IAXCHERI-NEXT: cmove ca1, ca5 -; RV32IAXCHERI-NEXT: mv a2, a3 -; RV32IAXCHERI-NEXT: mv a3, a6 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: cincoffset ca2, cnull, a3 +; RV32IAXCHERI-NEXT: csethigh ca2, ca2, a4 +; RV32IAXCHERI-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: clr.c.aq ca3, (ca0) +; RV32IAXCHERI-NEXT: cseqx a4, ca3, ca1 +; RV32IAXCHERI-NEXT: beqz a4, .LBB31_3 +; RV32IAXCHERI-NEXT: # %bb.2: # in Loop: Header=BB31_1 Depth=1 +; RV32IAXCHERI-NEXT: csc.c.aq a4, ca2, (ca0) +; RV32IAXCHERI-NEXT: bnez a4, .LBB31_1 +; RV32IAXCHERI-NEXT: .LBB31_3: ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: cmpxchg_i64_acquire_monotonic: @@ -1915,22 +1910,18 @@ define void @cmpxchg_i64_acquire_acquire(i64 addrspace(200)* %ptr, i64 %cmp, i64 ; ; RV32IAXCHERI-LABEL: cmpxchg_i64_acquire_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: mv a6, a4 -; RV32IAXCHERI-NEXT: mv a7, a3 -; RV32IAXCHERI-NEXT: cincoffset ca3, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds ca3, ca3, 8 -; RV32IAXCHERI-NEXT: csw a2, 4(csp) -; RV32IAXCHERI-NEXT: csw a1, 0(csp) -; RV32IAXCHERI-NEXT: li a4, 2 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca1, ca3 -; RV32IAXCHERI-NEXT: mv a2, a7 -; RV32IAXCHERI-NEXT: mv a3, a6 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: cincoffset ca2, cnull, a3 +; RV32IAXCHERI-NEXT: csethigh ca2, ca2, a4 +; RV32IAXCHERI-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: clr.c.aq ca3, (ca0) +; RV32IAXCHERI-NEXT: cseqx a4, ca3, ca1 +; RV32IAXCHERI-NEXT: beqz a4, .LBB32_3 +; RV32IAXCHERI-NEXT: # %bb.2: # in Loop: Header=BB32_1 Depth=1 +; RV32IAXCHERI-NEXT: csc.c.aq a4, ca2, (ca0) +; RV32IAXCHERI-NEXT: bnez a4, .LBB32_1 +; RV32IAXCHERI-NEXT: .LBB32_3: ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: cmpxchg_i64_acquire_acquire: @@ -1984,21 +1975,18 @@ define void @cmpxchg_i64_release_monotonic(i64 addrspace(200)* %ptr, i64 %cmp, i ; ; RV32IAXCHERI-LABEL: cmpxchg_i64_release_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: mv a6, a4 -; RV32IAXCHERI-NEXT: cincoffset ca4, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds ca5, ca4, 8 -; RV32IAXCHERI-NEXT: csw a2, 4(csp) -; RV32IAXCHERI-NEXT: csw a1, 0(csp) -; RV32IAXCHERI-NEXT: li a4, 3 -; RV32IAXCHERI-NEXT: cmove ca1, ca5 -; RV32IAXCHERI-NEXT: mv a2, a3 -; RV32IAXCHERI-NEXT: mv a3, a6 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: cincoffset ca2, cnull, a3 +; RV32IAXCHERI-NEXT: csethigh ca2, ca2, a4 +; RV32IAXCHERI-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: clr.c.rl ca3, (ca0) +; RV32IAXCHERI-NEXT: cseqx a4, ca3, ca1 +; RV32IAXCHERI-NEXT: beqz a4, .LBB33_3 +; RV32IAXCHERI-NEXT: # %bb.2: # in Loop: Header=BB33_1 Depth=1 +; RV32IAXCHERI-NEXT: csc.c a4, ca2, (ca0) +; RV32IAXCHERI-NEXT: bnez a4, .LBB33_1 +; RV32IAXCHERI-NEXT: .LBB33_3: ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: cmpxchg_i64_release_monotonic: @@ -2053,22 +2041,18 @@ define void @cmpxchg_i64_release_acquire(i64 addrspace(200)* %ptr, i64 %cmp, i64 ; ; RV32IAXCHERI-LABEL: cmpxchg_i64_release_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: mv a6, a4 -; RV32IAXCHERI-NEXT: mv a7, a3 -; RV32IAXCHERI-NEXT: cincoffset ca3, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds ca3, ca3, 8 -; RV32IAXCHERI-NEXT: csw a2, 4(csp) -; RV32IAXCHERI-NEXT: csw a1, 0(csp) -; RV32IAXCHERI-NEXT: li a4, 3 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca1, ca3 -; RV32IAXCHERI-NEXT: mv a2, a7 -; RV32IAXCHERI-NEXT: mv a3, a6 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: cincoffset ca2, cnull, a3 +; RV32IAXCHERI-NEXT: csethigh ca2, ca2, a4 +; RV32IAXCHERI-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: clr.c.aq ca3, (ca0) +; RV32IAXCHERI-NEXT: cseqx a4, ca3, ca1 +; RV32IAXCHERI-NEXT: beqz a4, .LBB34_3 +; RV32IAXCHERI-NEXT: # %bb.2: # in Loop: Header=BB34_1 Depth=1 +; RV32IAXCHERI-NEXT: csc.c.aq a4, ca2, (ca0) +; RV32IAXCHERI-NEXT: bnez a4, .LBB34_1 +; RV32IAXCHERI-NEXT: .LBB34_3: ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: cmpxchg_i64_release_acquire: @@ -2122,21 +2106,18 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64 addrspace(200)* %ptr, i64 %cmp, i ; ; RV32IAXCHERI-LABEL: cmpxchg_i64_acq_rel_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: mv a6, a4 -; RV32IAXCHERI-NEXT: cincoffset ca4, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds ca5, ca4, 8 -; RV32IAXCHERI-NEXT: csw a2, 4(csp) -; RV32IAXCHERI-NEXT: csw a1, 0(csp) -; RV32IAXCHERI-NEXT: li a4, 4 -; RV32IAXCHERI-NEXT: cmove ca1, ca5 -; RV32IAXCHERI-NEXT: mv a2, a3 -; RV32IAXCHERI-NEXT: mv a3, a6 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: cincoffset ca2, cnull, a3 +; RV32IAXCHERI-NEXT: csethigh ca2, ca2, a4 +; RV32IAXCHERI-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: clr.c.aq ca3, (ca0) +; RV32IAXCHERI-NEXT: cseqx a4, ca3, ca1 +; RV32IAXCHERI-NEXT: beqz a4, .LBB35_3 +; RV32IAXCHERI-NEXT: # %bb.2: # in Loop: Header=BB35_1 Depth=1 +; RV32IAXCHERI-NEXT: csc.c.aq a4, ca2, (ca0) +; RV32IAXCHERI-NEXT: bnez a4, .LBB35_1 +; RV32IAXCHERI-NEXT: .LBB35_3: ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: cmpxchg_i64_acq_rel_monotonic: @@ -2191,22 +2172,18 @@ define void @cmpxchg_i64_acq_rel_acquire(i64 addrspace(200)* %ptr, i64 %cmp, i64 ; ; RV32IAXCHERI-LABEL: cmpxchg_i64_acq_rel_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: mv a6, a4 -; RV32IAXCHERI-NEXT: mv a7, a3 -; RV32IAXCHERI-NEXT: cincoffset ca3, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds ca3, ca3, 8 -; RV32IAXCHERI-NEXT: csw a2, 4(csp) -; RV32IAXCHERI-NEXT: csw a1, 0(csp) -; RV32IAXCHERI-NEXT: li a4, 4 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca1, ca3 -; RV32IAXCHERI-NEXT: mv a2, a7 -; RV32IAXCHERI-NEXT: mv a3, a6 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: cincoffset ca2, cnull, a3 +; RV32IAXCHERI-NEXT: csethigh ca2, ca2, a4 +; RV32IAXCHERI-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: clr.c.aq ca3, (ca0) +; RV32IAXCHERI-NEXT: cseqx a4, ca3, ca1 +; RV32IAXCHERI-NEXT: beqz a4, .LBB36_3 +; RV32IAXCHERI-NEXT: # %bb.2: # in Loop: Header=BB36_1 Depth=1 +; RV32IAXCHERI-NEXT: csc.c.aq a4, ca2, (ca0) +; RV32IAXCHERI-NEXT: bnez a4, .LBB36_1 +; RV32IAXCHERI-NEXT: .LBB36_3: ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: cmpxchg_i64_acq_rel_acquire: @@ -2260,21 +2237,18 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64 addrspace(200)* %ptr, i64 %cmp, i ; ; RV32IAXCHERI-LABEL: cmpxchg_i64_seq_cst_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: mv a6, a4 -; RV32IAXCHERI-NEXT: cincoffset ca4, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds ca5, ca4, 8 -; RV32IAXCHERI-NEXT: csw a2, 4(csp) -; RV32IAXCHERI-NEXT: csw a1, 0(csp) -; RV32IAXCHERI-NEXT: li a4, 5 -; RV32IAXCHERI-NEXT: cmove ca1, ca5 -; RV32IAXCHERI-NEXT: mv a2, a3 -; RV32IAXCHERI-NEXT: mv a3, a6 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: cincoffset ca2, cnull, a3 +; RV32IAXCHERI-NEXT: csethigh ca2, ca2, a4 +; RV32IAXCHERI-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca3, (ca0) +; RV32IAXCHERI-NEXT: cseqx a4, ca3, ca1 +; RV32IAXCHERI-NEXT: beqz a4, .LBB37_3 +; RV32IAXCHERI-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1 +; RV32IAXCHERI-NEXT: csc.c.aqrl a4, ca2, (ca0) +; RV32IAXCHERI-NEXT: bnez a4, .LBB37_1 +; RV32IAXCHERI-NEXT: .LBB37_3: ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: cmpxchg_i64_seq_cst_monotonic: @@ -2329,22 +2303,18 @@ define void @cmpxchg_i64_seq_cst_acquire(i64 addrspace(200)* %ptr, i64 %cmp, i64 ; ; RV32IAXCHERI-LABEL: cmpxchg_i64_seq_cst_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: mv a6, a4 -; RV32IAXCHERI-NEXT: mv a7, a3 -; RV32IAXCHERI-NEXT: cincoffset ca3, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds ca3, ca3, 8 -; RV32IAXCHERI-NEXT: csw a2, 4(csp) -; RV32IAXCHERI-NEXT: csw a1, 0(csp) -; RV32IAXCHERI-NEXT: li a4, 5 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca1, ca3 -; RV32IAXCHERI-NEXT: mv a2, a7 -; RV32IAXCHERI-NEXT: mv a3, a6 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: cincoffset ca2, cnull, a3 +; RV32IAXCHERI-NEXT: csethigh ca2, ca2, a4 +; RV32IAXCHERI-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca3, (ca0) +; RV32IAXCHERI-NEXT: cseqx a4, ca3, ca1 +; RV32IAXCHERI-NEXT: beqz a4, .LBB38_3 +; RV32IAXCHERI-NEXT: # %bb.2: # in Loop: Header=BB38_1 Depth=1 +; RV32IAXCHERI-NEXT: csc.c.aqrl a4, ca2, (ca0) +; RV32IAXCHERI-NEXT: bnez a4, .LBB38_1 +; RV32IAXCHERI-NEXT: .LBB38_3: ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: cmpxchg_i64_seq_cst_acquire: @@ -2399,22 +2369,18 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64 addrspace(200)* %ptr, i64 %cmp, i64 ; ; RV32IAXCHERI-LABEL: cmpxchg_i64_seq_cst_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: mv a6, a4 -; RV32IAXCHERI-NEXT: mv a7, a3 -; RV32IAXCHERI-NEXT: cincoffset ca3, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds ca3, ca3, 8 -; RV32IAXCHERI-NEXT: csw a2, 4(csp) -; RV32IAXCHERI-NEXT: csw a1, 0(csp) -; RV32IAXCHERI-NEXT: li a4, 5 -; RV32IAXCHERI-NEXT: li a5, 5 -; RV32IAXCHERI-NEXT: cmove ca1, ca3 -; RV32IAXCHERI-NEXT: mv a2, a7 -; RV32IAXCHERI-NEXT: mv a3, a6 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: cincoffset ca2, cnull, a3 +; RV32IAXCHERI-NEXT: csethigh ca2, ca2, a4 +; RV32IAXCHERI-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca3, (ca0) +; RV32IAXCHERI-NEXT: cseqx a4, ca3, ca1 +; RV32IAXCHERI-NEXT: beqz a4, .LBB39_3 +; RV32IAXCHERI-NEXT: # %bb.2: # in Loop: Header=BB39_1 Depth=1 +; RV32IAXCHERI-NEXT: csc.c.aqrl a4, ca2, (ca0) +; RV32IAXCHERI-NEXT: bnez a4, .LBB39_1 +; RV32IAXCHERI-NEXT: .LBB39_3: ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: cmpxchg_i64_seq_cst_seq_cst: From f736e097fbb8963a21f21fa84995a1972f00afea Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Wed, 20 Sep 2023 19:24:12 -0700 Subject: [PATCH 10/18] [CHERI-RISC-V] Support inline atomic RMW for 2*XLen integers This completes the set of atomic operations on i128/i64 and will allow use to return true for `__atomic_always_lock_free(sizeof(__intcap))`. --- llvm/lib/CodeGen/AtomicExpandPass.cpp | 7 + llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 5 +- .../RISCV32/atomic-cap-size-int.ll | 2439 +++++++++++---- .../RISCV64/atomic-cap-size-int.ll | 2439 +++++++++++---- llvm/test/CodeGen/RISCV/cheri/atomic-rmw.ll | 2700 +++++++++++------ 5 files changed, 5307 insertions(+), 2283 deletions(-) diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp index 0ebe61286eda..524402369c8e 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -326,6 +326,13 @@ bool AtomicExpand::runOnFunction(Function &F) { MadeChange |= tryExpandAtomicCmpXchg(CASI); } } + if (MadeChange) { + // If we expanded an instruction, we might need to expand that one too. + // This can happen when expanding a RMW to a cmxchg that also needs + // expanding. + // TODO: assert that we don't recurse more than once or use a worklist? + runOnFunction(F); + } return MadeChange; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 39d32f6cbef1..0da520d5c385 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12761,6 +12761,10 @@ RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { if ((Size == 8 || Size == 16) && !RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) return AtomicExpansionKind::MaskedIntrinsic; + if (Subtarget.hasCheri() && + Size == Subtarget.typeForCapabilities().getSizeInBits()) { + return AtomicExpansionKind::CmpXChg; + } return AtomicExpansionKind::None; } @@ -12913,7 +12917,6 @@ bool RISCVTargetLowering::supportsAtomicOperation(const DataLayout &DL, // Using capability pointers in hybrid mode is not yet supported for this // as we are missing some required patterns. if (Subtarget.hasStdExtA() && Subtarget.hasCheri() && - (isa(AI) || isa(AI) || isa(AI)) && ValueTy->isIntegerTy(Subtarget.typeForCapabilities().getSizeInBits()) && DL.isFatPointer(PointerTy) == IsPureCapABI) return true; diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll index 409b838df530..1ba10fde024e 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll @@ -176,25 +176,89 @@ define i64 @load(ptr addrspace(200) %ptr) nounwind { } define i64 @atomic_xchg(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_xchg: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_exchange_8 -; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_xchg: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_exchange_8@plt -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_xchg: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB2_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB2_3 Depth 2 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a1 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a2 +; PURECAP-ATOMICS-NEXT: .LBB2_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB2_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB2_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB2_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB2_3 +; PURECAP-ATOMICS-NEXT: .LBB2_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB2_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB2_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_xchg: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_exchange_8 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_xchg: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB2_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB2_3 Depth 2 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a1 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a2 +; HYBRID-ATOMICS-NEXT: .LBB2_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB2_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB2_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB2_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB2_3 +; HYBRID-ATOMICS-NEXT: .LBB2_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB2_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB2_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_xchg: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_exchange_8@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_xchg: ; HYBRID-CAP-PTR: # %bb.0: @@ -207,8 +271,34 @@ define i64 @atomic_xchg(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xchg ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_exchange_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP2]], i32 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[VAL]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i64 [[VAL]], 32 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP6]], i32 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i32 [[TMP13]] to i64 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 32 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i64 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xchg ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { @@ -220,25 +310,97 @@ define i64 @atomic_xchg(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @atomic_add(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_add: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_add_8 -; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_add: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_add_8@plt -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_add: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB3_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB3_3 Depth 2 +; PURECAP-ATOMICS-NEXT: add a5, a4, a1 +; PURECAP-ATOMICS-NEXT: sltu a6, a5, a4 +; PURECAP-ATOMICS-NEXT: add a7, a3, a2 +; PURECAP-ATOMICS-NEXT: add a6, a7, a6 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a5 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a6 +; PURECAP-ATOMICS-NEXT: .LBB3_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB3_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB3_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB3_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB3_3 +; PURECAP-ATOMICS-NEXT: .LBB3_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB3_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB3_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_add: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_add_8 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_add: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB3_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB3_3 Depth 2 +; HYBRID-ATOMICS-NEXT: add a5, a4, a1 +; HYBRID-ATOMICS-NEXT: sltu a6, a5, a4 +; HYBRID-ATOMICS-NEXT: add a7, a3, a2 +; HYBRID-ATOMICS-NEXT: add a6, a7, a6 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a5 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a6 +; HYBRID-ATOMICS-NEXT: .LBB3_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB3_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB3_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB3_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB3_3 +; HYBRID-ATOMICS-NEXT: .LBB3_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB3_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB3_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_add: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_add_8@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_add: ; HYBRID-CAP-PTR: # %bb.0: @@ -251,8 +413,35 @@ define i64 @atomic_add(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_add ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_add_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[NEW:%.*]] = add i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP2]], i32 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP6]], i32 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i32 [[TMP13]] to i64 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 32 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i64 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_add ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { @@ -264,25 +453,97 @@ define i64 @atomic_add(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @atomic_sub(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_sub: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_sub_8 -; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_sub: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_sub_8@plt -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_sub: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB4_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB4_3 Depth 2 +; PURECAP-ATOMICS-NEXT: sltu a5, a4, a1 +; PURECAP-ATOMICS-NEXT: sub a6, a3, a2 +; PURECAP-ATOMICS-NEXT: sub a5, a6, a5 +; PURECAP-ATOMICS-NEXT: sub a6, a4, a1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB4_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB4_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB4_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB4_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB4_3 +; PURECAP-ATOMICS-NEXT: .LBB4_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB4_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB4_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_sub: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_sub_8 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_sub: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB4_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB4_3 Depth 2 +; HYBRID-ATOMICS-NEXT: sltu a5, a4, a1 +; HYBRID-ATOMICS-NEXT: sub a6, a3, a2 +; HYBRID-ATOMICS-NEXT: sub a5, a6, a5 +; HYBRID-ATOMICS-NEXT: sub a6, a4, a1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB4_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB4_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB4_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB4_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB4_3 +; HYBRID-ATOMICS-NEXT: .LBB4_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB4_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB4_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_sub: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_sub_8@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_sub: ; HYBRID-CAP-PTR: # %bb.0: @@ -295,8 +556,35 @@ define i64 @atomic_sub(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_sub ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_sub_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP2]], i32 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP6]], i32 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i32 [[TMP13]] to i64 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 32 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i64 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_sub ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { @@ -308,25 +596,93 @@ define i64 @atomic_sub(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @atomic_and(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_and: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_and_8 -; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_and: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_and_8@plt -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_and: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB5_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB5_3 Depth 2 +; PURECAP-ATOMICS-NEXT: and a5, a3, a2 +; PURECAP-ATOMICS-NEXT: and a6, a4, a1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB5_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB5_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB5_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB5_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB5_3 +; PURECAP-ATOMICS-NEXT: .LBB5_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB5_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB5_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_and: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_and_8 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_and: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB5_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB5_3 Depth 2 +; HYBRID-ATOMICS-NEXT: and a5, a3, a2 +; HYBRID-ATOMICS-NEXT: and a6, a4, a1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB5_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB5_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB5_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB5_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB5_3 +; HYBRID-ATOMICS-NEXT: .LBB5_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB5_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB5_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_and: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_and_8@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_and: ; HYBRID-CAP-PTR: # %bb.0: @@ -339,8 +695,35 @@ define i64 @atomic_and(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_and ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_and_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP2]], i32 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP6]], i32 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i32 [[TMP13]] to i64 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 32 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i64 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_and ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { @@ -352,25 +735,97 @@ define i64 @atomic_and(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @atomic_nand(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_nand: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_nand_8 -; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_nand: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_nand_8@plt -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_nand: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB6_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB6_3 Depth 2 +; PURECAP-ATOMICS-NEXT: and a5, a4, a1 +; PURECAP-ATOMICS-NEXT: and a6, a3, a2 +; PURECAP-ATOMICS-NEXT: not a6, a6 +; PURECAP-ATOMICS-NEXT: not a5, a5 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a5 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a6 +; PURECAP-ATOMICS-NEXT: .LBB6_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB6_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB6_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB6_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB6_3 +; PURECAP-ATOMICS-NEXT: .LBB6_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB6_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB6_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_nand: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_nand_8 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_nand: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB6_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB6_3 Depth 2 +; HYBRID-ATOMICS-NEXT: and a5, a4, a1 +; HYBRID-ATOMICS-NEXT: and a6, a3, a2 +; HYBRID-ATOMICS-NEXT: not a6, a6 +; HYBRID-ATOMICS-NEXT: not a5, a5 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a5 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a6 +; HYBRID-ATOMICS-NEXT: .LBB6_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB6_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB6_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB6_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB6_3 +; HYBRID-ATOMICS-NEXT: .LBB6_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB6_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB6_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_nand: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_nand_8@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_nand: ; HYBRID-CAP-PTR: # %bb.0: @@ -383,8 +838,36 @@ define i64 @atomic_nand(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_nand ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_nand_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = and i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = xor i64 [[TMP2]], -1 +; PURECAP-IR-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP4:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP3]], i32 [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP8:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP7]], i32 [[TMP9]]) +; PURECAP-IR-NEXT: [[TMP11:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP6]], ptr addrspace(200) [[TMP10]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 0 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 1 +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = zext i32 [[TMP15]] to i64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = shl i64 [[TMP17]], 32 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = or i64 [[TMP16]], [[TMP18]] +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP19]], 0 +; PURECAP-IR-NEXT: [[TMP21:%.*]] = insertvalue { i64, i1 } [[TMP20]], i1 [[TMP13]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP21]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP21]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_nand ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { @@ -396,25 +879,93 @@ define i64 @atomic_nand(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @atomic_or(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_or: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_or_8 -; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_or: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_or_8@plt -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_or: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB7_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB7_3 Depth 2 +; PURECAP-ATOMICS-NEXT: or a5, a3, a2 +; PURECAP-ATOMICS-NEXT: or a6, a4, a1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB7_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB7_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB7_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB7_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB7_3 +; PURECAP-ATOMICS-NEXT: .LBB7_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB7_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB7_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_or: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_or_8 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_or: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB7_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB7_3 Depth 2 +; HYBRID-ATOMICS-NEXT: or a5, a3, a2 +; HYBRID-ATOMICS-NEXT: or a6, a4, a1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB7_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB7_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB7_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB7_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB7_3 +; HYBRID-ATOMICS-NEXT: .LBB7_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB7_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB7_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_or: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_or_8@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_or: ; HYBRID-CAP-PTR: # %bb.0: @@ -427,8 +978,35 @@ define i64 @atomic_or(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_or ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_or_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[NEW:%.*]] = or i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP2]], i32 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP6]], i32 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i32 [[TMP13]] to i64 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 32 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i64 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_or ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { @@ -440,25 +1018,93 @@ define i64 @atomic_or(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @atomic_xor(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_xor: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_xor_8 -; PURECAP-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_xor: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_xor_8@plt -; HYBRID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_xor: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB8_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB8_3 Depth 2 +; PURECAP-ATOMICS-NEXT: xor a5, a3, a2 +; PURECAP-ATOMICS-NEXT: xor a6, a4, a1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB8_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB8_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB8_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB8_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB8_3 +; PURECAP-ATOMICS-NEXT: .LBB8_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB8_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB8_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_xor: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_xor_8 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_xor: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB8_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB8_3 Depth 2 +; HYBRID-ATOMICS-NEXT: xor a5, a3, a2 +; HYBRID-ATOMICS-NEXT: xor a6, a4, a1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB8_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB8_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB8_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB8_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB8_3 +; HYBRID-ATOMICS-NEXT: .LBB8_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB8_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB8_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_xor: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_xor_8@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_xor: ; HYBRID-CAP-PTR: # %bb.0: @@ -471,8 +1117,35 @@ define i64 @atomic_xor(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xor ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_fetch_xor_8(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i64 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[NEW:%.*]] = xor i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP2]], i32 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP6]], i32 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i32 [[TMP13]] to i64 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 32 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i64 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xor ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { @@ -484,117 +1157,221 @@ define i64 @atomic_xor(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @atomic_max(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_max: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -48 -; PURECAP-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: cmove cs3, ca0 -; PURECAP-NEXT: clw a5, 4(ca0) -; PURECAP-NEXT: clw a4, 0(ca0) -; PURECAP-NEXT: mv s1, a2 -; PURECAP-NEXT: mv s2, a1 -; PURECAP-NEXT: cincoffset ca0, csp, 0 -; PURECAP-NEXT: csetbounds cs0, ca0, 8 -; PURECAP-NEXT: j .LBB9_2 -; PURECAP-NEXT: .LBB9_1: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 -; PURECAP-NEXT: csw a4, 0(csp) -; PURECAP-NEXT: csw a5, 4(csp) -; PURECAP-NEXT: li a4, 5 -; PURECAP-NEXT: li a5, 5 -; PURECAP-NEXT: cmove ca0, cs3 -; PURECAP-NEXT: cmove ca1, cs0 -; PURECAP-NEXT: ccall __atomic_compare_exchange_8 -; PURECAP-NEXT: clw a5, 4(csp) -; PURECAP-NEXT: clw a4, 0(csp) -; PURECAP-NEXT: bnez a0, .LBB9_7 -; PURECAP-NEXT: .LBB9_2: # %atomicrmw.start -; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 -; PURECAP-NEXT: beq a5, s1, .LBB9_4 -; PURECAP-NEXT: # %bb.3: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 -; PURECAP-NEXT: slt a0, s1, a5 -; PURECAP-NEXT: j .LBB9_5 -; PURECAP-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 -; PURECAP-NEXT: sltu a0, s2, a4 -; PURECAP-NEXT: .LBB9_5: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 -; PURECAP-NEXT: mv a2, a4 -; PURECAP-NEXT: mv a3, a5 -; PURECAP-NEXT: bnez a0, .LBB9_1 -; PURECAP-NEXT: # %bb.6: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 -; PURECAP-NEXT: mv a2, s2 -; PURECAP-NEXT: mv a3, s1 -; PURECAP-NEXT: j .LBB9_1 -; PURECAP-NEXT: .LBB9_7: # %atomicrmw.end -; PURECAP-NEXT: mv a0, a4 -; PURECAP-NEXT: mv a1, a5 -; PURECAP-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 48 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_max: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -32 -; HYBRID-NEXT: sw ra, 28(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s0, 24(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s1, 20(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s2, 16(sp) # 4-byte Folded Spill -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: lw a5, 4(a0) -; HYBRID-NEXT: lw a4, 0(a0) -; HYBRID-NEXT: mv s1, a2 -; HYBRID-NEXT: mv s2, a1 -; HYBRID-NEXT: j .LBB9_2 -; HYBRID-NEXT: .LBB9_1: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 -; HYBRID-NEXT: sw a4, 8(sp) -; HYBRID-NEXT: sw a5, 12(sp) -; HYBRID-NEXT: addi a1, sp, 8 -; HYBRID-NEXT: li a4, 5 -; HYBRID-NEXT: li a5, 5 -; HYBRID-NEXT: mv a0, s0 -; HYBRID-NEXT: call __atomic_compare_exchange_8@plt -; HYBRID-NEXT: lw a5, 12(sp) -; HYBRID-NEXT: lw a4, 8(sp) -; HYBRID-NEXT: bnez a0, .LBB9_7 -; HYBRID-NEXT: .LBB9_2: # %atomicrmw.start -; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 -; HYBRID-NEXT: beq a5, s1, .LBB9_4 -; HYBRID-NEXT: # %bb.3: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 -; HYBRID-NEXT: slt a0, s1, a5 -; HYBRID-NEXT: j .LBB9_5 -; HYBRID-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 -; HYBRID-NEXT: sltu a0, s2, a4 -; HYBRID-NEXT: .LBB9_5: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 -; HYBRID-NEXT: mv a2, a4 -; HYBRID-NEXT: mv a3, a5 -; HYBRID-NEXT: bnez a0, .LBB9_1 -; HYBRID-NEXT: # %bb.6: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 -; HYBRID-NEXT: mv a2, s2 -; HYBRID-NEXT: mv a3, s1 -; HYBRID-NEXT: j .LBB9_1 -; HYBRID-NEXT: .LBB9_7: # %atomicrmw.end -; HYBRID-NEXT: mv a0, a4 -; HYBRID-NEXT: mv a1, a5 -; HYBRID-NEXT: lw ra, 28(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s0, 24(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s1, 20(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s2, 16(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 32 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_max: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: j .LBB9_2 +; PURECAP-ATOMICS-NEXT: .LBB9_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB9_8: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB9_10 +; PURECAP-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_8 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB9_8 +; PURECAP-ATOMICS-NEXT: .LBB9_10: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB9_7 +; PURECAP-ATOMICS-NEXT: .LBB9_2: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB9_8 Depth 2 +; PURECAP-ATOMICS-NEXT: beq a3, a2, .LBB9_4 +; PURECAP-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: slt a6, a2, a3 +; PURECAP-ATOMICS-NEXT: j .LBB9_5 +; PURECAP-ATOMICS-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a6, a1, a4 +; PURECAP-ATOMICS-NEXT: .LBB9_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a3 +; PURECAP-ATOMICS-NEXT: mv a7, a4 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB9_1 +; PURECAP-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a2 +; PURECAP-ATOMICS-NEXT: mv a7, a1 +; PURECAP-ATOMICS-NEXT: j .LBB9_1 +; PURECAP-ATOMICS-NEXT: .LBB9_7: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_max: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -48 +; PURECAP-LIBCALLS-NEXT: csc cra, 40(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: cmove cs3, ca0 +; PURECAP-LIBCALLS-NEXT: clw a5, 4(ca0) +; PURECAP-LIBCALLS-NEXT: clw a4, 0(ca0) +; PURECAP-LIBCALLS-NEXT: mv s1, a2 +; PURECAP-LIBCALLS-NEXT: mv s2, a1 +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds cs0, ca0, 8 +; PURECAP-LIBCALLS-NEXT: j .LBB9_2 +; PURECAP-LIBCALLS-NEXT: .LBB9_1: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: csw a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: csw a5, 4(csp) +; PURECAP-LIBCALLS-NEXT: li a4, 5 +; PURECAP-LIBCALLS-NEXT: li a5, 5 +; PURECAP-LIBCALLS-NEXT: cmove ca0, cs3 +; PURECAP-LIBCALLS-NEXT: cmove ca1, cs0 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-LIBCALLS-NEXT: clw a5, 4(csp) +; PURECAP-LIBCALLS-NEXT: clw a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB9_7 +; PURECAP-LIBCALLS-NEXT: .LBB9_2: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-LIBCALLS-NEXT: beq a5, s1, .LBB9_4 +; PURECAP-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: slt a0, s1, a5 +; PURECAP-LIBCALLS-NEXT: j .LBB9_5 +; PURECAP-LIBCALLS-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s2, a4 +; PURECAP-LIBCALLS-NEXT: .LBB9_5: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, a4 +; PURECAP-LIBCALLS-NEXT: mv a3, a5 +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB9_1 +; PURECAP-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, s2 +; PURECAP-LIBCALLS-NEXT: mv a3, s1 +; PURECAP-LIBCALLS-NEXT: j .LBB9_1 +; PURECAP-LIBCALLS-NEXT: .LBB9_7: # %atomicrmw.end +; PURECAP-LIBCALLS-NEXT: mv a0, a4 +; PURECAP-LIBCALLS-NEXT: mv a1, a5 +; PURECAP-LIBCALLS-NEXT: clc cra, 40(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 48 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_max: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: j .LBB9_2 +; HYBRID-ATOMICS-NEXT: .LBB9_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB9_8: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB9_10 +; HYBRID-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_8 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB9_8 +; HYBRID-ATOMICS-NEXT: .LBB9_10: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB9_7 +; HYBRID-ATOMICS-NEXT: .LBB9_2: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB9_8 Depth 2 +; HYBRID-ATOMICS-NEXT: beq a3, a2, .LBB9_4 +; HYBRID-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: slt a6, a2, a3 +; HYBRID-ATOMICS-NEXT: j .LBB9_5 +; HYBRID-ATOMICS-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a6, a1, a4 +; HYBRID-ATOMICS-NEXT: .LBB9_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a3 +; HYBRID-ATOMICS-NEXT: mv a7, a4 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB9_1 +; HYBRID-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a2 +; HYBRID-ATOMICS-NEXT: mv a7, a1 +; HYBRID-ATOMICS-NEXT: j .LBB9_1 +; HYBRID-ATOMICS-NEXT: .LBB9_7: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_max: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -32 +; HYBRID-LIBCALLS-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: lw a5, 4(a0) +; HYBRID-LIBCALLS-NEXT: lw a4, 0(a0) +; HYBRID-LIBCALLS-NEXT: mv s1, a2 +; HYBRID-LIBCALLS-NEXT: mv s2, a1 +; HYBRID-LIBCALLS-NEXT: j .LBB9_2 +; HYBRID-LIBCALLS-NEXT: .LBB9_1: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sw a4, 8(sp) +; HYBRID-LIBCALLS-NEXT: sw a5, 12(sp) +; HYBRID-LIBCALLS-NEXT: addi a1, sp, 8 +; HYBRID-LIBCALLS-NEXT: li a4, 5 +; HYBRID-LIBCALLS-NEXT: li a5, 5 +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-LIBCALLS-NEXT: lw a5, 12(sp) +; HYBRID-LIBCALLS-NEXT: lw a4, 8(sp) +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB9_7 +; HYBRID-LIBCALLS-NEXT: .LBB9_2: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-LIBCALLS-NEXT: beq a5, s1, .LBB9_4 +; HYBRID-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: slt a0, s1, a5 +; HYBRID-LIBCALLS-NEXT: j .LBB9_5 +; HYBRID-LIBCALLS-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s2, a4 +; HYBRID-LIBCALLS-NEXT: .LBB9_5: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, a4 +; HYBRID-LIBCALLS-NEXT: mv a3, a5 +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB9_1 +; HYBRID-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, s2 +; HYBRID-LIBCALLS-NEXT: mv a3, s1 +; HYBRID-LIBCALLS-NEXT: j .LBB9_1 +; HYBRID-LIBCALLS-NEXT: .LBB9_7: # %atomicrmw.end +; HYBRID-LIBCALLS-NEXT: mv a0, a4 +; HYBRID-LIBCALLS-NEXT: mv a1, a5 +; HYBRID-LIBCALLS-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 32 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_max: ; HYBRID-CAP-PTR: # %bb.0: @@ -650,22 +1427,33 @@ define i64 @atomic_max(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_max ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) -; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 ; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] ; PURECAP-IR: atomicrmw.start: -; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] -; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp sgt i64 [[LOADED]], [[VAL]] -; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i64 [[LOADED]], ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 5, i32 5) -; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 -; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 -; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 -; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VAL]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP4:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP3]], i32 [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP8:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP7]], i32 [[TMP9]]) +; PURECAP-IR-NEXT: [[TMP11:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP6]], ptr addrspace(200) [[TMP10]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 0 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 1 +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = zext i32 [[TMP15]] to i64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = shl i64 [[TMP17]], 32 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = or i64 [[TMP16]], [[TMP18]] +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP19]], 0 +; PURECAP-IR-NEXT: [[TMP21:%.*]] = insertvalue { i64, i1 } [[TMP20]], i1 [[TMP13]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP21]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP21]], 0 ; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] ; PURECAP-IR: atomicrmw.end: ; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] @@ -697,119 +1485,225 @@ define i64 @atomic_max(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @atomic_min(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_min: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -48 -; PURECAP-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: cmove cs3, ca0 -; PURECAP-NEXT: clw a5, 4(ca0) -; PURECAP-NEXT: clw a4, 0(ca0) -; PURECAP-NEXT: mv s1, a2 -; PURECAP-NEXT: mv s2, a1 -; PURECAP-NEXT: cincoffset ca0, csp, 0 -; PURECAP-NEXT: csetbounds cs0, ca0, 8 -; PURECAP-NEXT: j .LBB10_2 -; PURECAP-NEXT: .LBB10_1: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 -; PURECAP-NEXT: csw a4, 0(csp) -; PURECAP-NEXT: csw a5, 4(csp) -; PURECAP-NEXT: li a4, 5 -; PURECAP-NEXT: li a5, 5 -; PURECAP-NEXT: cmove ca0, cs3 -; PURECAP-NEXT: cmove ca1, cs0 -; PURECAP-NEXT: ccall __atomic_compare_exchange_8 -; PURECAP-NEXT: clw a5, 4(csp) -; PURECAP-NEXT: clw a4, 0(csp) -; PURECAP-NEXT: bnez a0, .LBB10_7 -; PURECAP-NEXT: .LBB10_2: # %atomicrmw.start -; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 -; PURECAP-NEXT: beq a5, s1, .LBB10_4 -; PURECAP-NEXT: # %bb.3: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 -; PURECAP-NEXT: slt a0, s1, a5 -; PURECAP-NEXT: j .LBB10_5 -; PURECAP-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 -; PURECAP-NEXT: sltu a0, s2, a4 -; PURECAP-NEXT: .LBB10_5: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 -; PURECAP-NEXT: xori a0, a0, 1 -; PURECAP-NEXT: mv a2, a4 -; PURECAP-NEXT: mv a3, a5 -; PURECAP-NEXT: bnez a0, .LBB10_1 -; PURECAP-NEXT: # %bb.6: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 -; PURECAP-NEXT: mv a2, s2 -; PURECAP-NEXT: mv a3, s1 -; PURECAP-NEXT: j .LBB10_1 -; PURECAP-NEXT: .LBB10_7: # %atomicrmw.end -; PURECAP-NEXT: mv a0, a4 -; PURECAP-NEXT: mv a1, a5 -; PURECAP-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 48 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_min: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -32 -; HYBRID-NEXT: sw ra, 28(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s0, 24(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s1, 20(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s2, 16(sp) # 4-byte Folded Spill -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: lw a5, 4(a0) -; HYBRID-NEXT: lw a4, 0(a0) -; HYBRID-NEXT: mv s1, a2 -; HYBRID-NEXT: mv s2, a1 -; HYBRID-NEXT: j .LBB10_2 -; HYBRID-NEXT: .LBB10_1: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 -; HYBRID-NEXT: sw a4, 8(sp) -; HYBRID-NEXT: sw a5, 12(sp) -; HYBRID-NEXT: addi a1, sp, 8 -; HYBRID-NEXT: li a4, 5 -; HYBRID-NEXT: li a5, 5 -; HYBRID-NEXT: mv a0, s0 -; HYBRID-NEXT: call __atomic_compare_exchange_8@plt -; HYBRID-NEXT: lw a5, 12(sp) -; HYBRID-NEXT: lw a4, 8(sp) -; HYBRID-NEXT: bnez a0, .LBB10_7 -; HYBRID-NEXT: .LBB10_2: # %atomicrmw.start -; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 -; HYBRID-NEXT: beq a5, s1, .LBB10_4 -; HYBRID-NEXT: # %bb.3: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 -; HYBRID-NEXT: slt a0, s1, a5 -; HYBRID-NEXT: j .LBB10_5 -; HYBRID-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 -; HYBRID-NEXT: sltu a0, s2, a4 -; HYBRID-NEXT: .LBB10_5: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 -; HYBRID-NEXT: xori a0, a0, 1 -; HYBRID-NEXT: mv a2, a4 -; HYBRID-NEXT: mv a3, a5 -; HYBRID-NEXT: bnez a0, .LBB10_1 -; HYBRID-NEXT: # %bb.6: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 -; HYBRID-NEXT: mv a2, s2 -; HYBRID-NEXT: mv a3, s1 -; HYBRID-NEXT: j .LBB10_1 -; HYBRID-NEXT: .LBB10_7: # %atomicrmw.end -; HYBRID-NEXT: mv a0, a4 -; HYBRID-NEXT: mv a1, a5 -; HYBRID-NEXT: lw ra, 28(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s0, 24(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s1, 20(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s2, 16(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 32 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_min: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: j .LBB10_2 +; PURECAP-ATOMICS-NEXT: .LBB10_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB10_8: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB10_10 +; PURECAP-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_8 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB10_8 +; PURECAP-ATOMICS-NEXT: .LBB10_10: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB10_7 +; PURECAP-ATOMICS-NEXT: .LBB10_2: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB10_8 Depth 2 +; PURECAP-ATOMICS-NEXT: beq a3, a2, .LBB10_4 +; PURECAP-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: slt a5, a2, a3 +; PURECAP-ATOMICS-NEXT: j .LBB10_5 +; PURECAP-ATOMICS-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a5, a1, a4 +; PURECAP-ATOMICS-NEXT: .LBB10_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: xori a6, a5, 1 +; PURECAP-ATOMICS-NEXT: mv a5, a3 +; PURECAP-ATOMICS-NEXT: mv a7, a4 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB10_1 +; PURECAP-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a2 +; PURECAP-ATOMICS-NEXT: mv a7, a1 +; PURECAP-ATOMICS-NEXT: j .LBB10_1 +; PURECAP-ATOMICS-NEXT: .LBB10_7: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_min: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -48 +; PURECAP-LIBCALLS-NEXT: csc cra, 40(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: cmove cs3, ca0 +; PURECAP-LIBCALLS-NEXT: clw a5, 4(ca0) +; PURECAP-LIBCALLS-NEXT: clw a4, 0(ca0) +; PURECAP-LIBCALLS-NEXT: mv s1, a2 +; PURECAP-LIBCALLS-NEXT: mv s2, a1 +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds cs0, ca0, 8 +; PURECAP-LIBCALLS-NEXT: j .LBB10_2 +; PURECAP-LIBCALLS-NEXT: .LBB10_1: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: csw a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: csw a5, 4(csp) +; PURECAP-LIBCALLS-NEXT: li a4, 5 +; PURECAP-LIBCALLS-NEXT: li a5, 5 +; PURECAP-LIBCALLS-NEXT: cmove ca0, cs3 +; PURECAP-LIBCALLS-NEXT: cmove ca1, cs0 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-LIBCALLS-NEXT: clw a5, 4(csp) +; PURECAP-LIBCALLS-NEXT: clw a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB10_7 +; PURECAP-LIBCALLS-NEXT: .LBB10_2: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-LIBCALLS-NEXT: beq a5, s1, .LBB10_4 +; PURECAP-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: slt a0, s1, a5 +; PURECAP-LIBCALLS-NEXT: j .LBB10_5 +; PURECAP-LIBCALLS-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s2, a4 +; PURECAP-LIBCALLS-NEXT: .LBB10_5: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: xori a0, a0, 1 +; PURECAP-LIBCALLS-NEXT: mv a2, a4 +; PURECAP-LIBCALLS-NEXT: mv a3, a5 +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB10_1 +; PURECAP-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, s2 +; PURECAP-LIBCALLS-NEXT: mv a3, s1 +; PURECAP-LIBCALLS-NEXT: j .LBB10_1 +; PURECAP-LIBCALLS-NEXT: .LBB10_7: # %atomicrmw.end +; PURECAP-LIBCALLS-NEXT: mv a0, a4 +; PURECAP-LIBCALLS-NEXT: mv a1, a5 +; PURECAP-LIBCALLS-NEXT: clc cra, 40(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 48 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_min: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: j .LBB10_2 +; HYBRID-ATOMICS-NEXT: .LBB10_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB10_8: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB10_10 +; HYBRID-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_8 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB10_8 +; HYBRID-ATOMICS-NEXT: .LBB10_10: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB10_7 +; HYBRID-ATOMICS-NEXT: .LBB10_2: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB10_8 Depth 2 +; HYBRID-ATOMICS-NEXT: beq a3, a2, .LBB10_4 +; HYBRID-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: slt a5, a2, a3 +; HYBRID-ATOMICS-NEXT: j .LBB10_5 +; HYBRID-ATOMICS-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a5, a1, a4 +; HYBRID-ATOMICS-NEXT: .LBB10_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: xori a6, a5, 1 +; HYBRID-ATOMICS-NEXT: mv a5, a3 +; HYBRID-ATOMICS-NEXT: mv a7, a4 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB10_1 +; HYBRID-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a2 +; HYBRID-ATOMICS-NEXT: mv a7, a1 +; HYBRID-ATOMICS-NEXT: j .LBB10_1 +; HYBRID-ATOMICS-NEXT: .LBB10_7: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_min: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -32 +; HYBRID-LIBCALLS-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: lw a5, 4(a0) +; HYBRID-LIBCALLS-NEXT: lw a4, 0(a0) +; HYBRID-LIBCALLS-NEXT: mv s1, a2 +; HYBRID-LIBCALLS-NEXT: mv s2, a1 +; HYBRID-LIBCALLS-NEXT: j .LBB10_2 +; HYBRID-LIBCALLS-NEXT: .LBB10_1: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sw a4, 8(sp) +; HYBRID-LIBCALLS-NEXT: sw a5, 12(sp) +; HYBRID-LIBCALLS-NEXT: addi a1, sp, 8 +; HYBRID-LIBCALLS-NEXT: li a4, 5 +; HYBRID-LIBCALLS-NEXT: li a5, 5 +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-LIBCALLS-NEXT: lw a5, 12(sp) +; HYBRID-LIBCALLS-NEXT: lw a4, 8(sp) +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB10_7 +; HYBRID-LIBCALLS-NEXT: .LBB10_2: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-LIBCALLS-NEXT: beq a5, s1, .LBB10_4 +; HYBRID-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: slt a0, s1, a5 +; HYBRID-LIBCALLS-NEXT: j .LBB10_5 +; HYBRID-LIBCALLS-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s2, a4 +; HYBRID-LIBCALLS-NEXT: .LBB10_5: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: xori a0, a0, 1 +; HYBRID-LIBCALLS-NEXT: mv a2, a4 +; HYBRID-LIBCALLS-NEXT: mv a3, a5 +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB10_1 +; HYBRID-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, s2 +; HYBRID-LIBCALLS-NEXT: mv a3, s1 +; HYBRID-LIBCALLS-NEXT: j .LBB10_1 +; HYBRID-LIBCALLS-NEXT: .LBB10_7: # %atomicrmw.end +; HYBRID-LIBCALLS-NEXT: mv a0, a4 +; HYBRID-LIBCALLS-NEXT: mv a1, a5 +; HYBRID-LIBCALLS-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 32 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_min: ; HYBRID-CAP-PTR: # %bb.0: @@ -866,22 +1760,33 @@ define i64 @atomic_min(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_min ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) -; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 ; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] ; PURECAP-IR: atomicrmw.start: -; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] -; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp sle i64 [[LOADED]], [[VAL]] -; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i64 [[LOADED]], ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 5, i32 5) -; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 -; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 -; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 -; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = icmp sle i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VAL]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP4:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP3]], i32 [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP8:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP7]], i32 [[TMP9]]) +; PURECAP-IR-NEXT: [[TMP11:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP6]], ptr addrspace(200) [[TMP10]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 0 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 1 +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = zext i32 [[TMP15]] to i64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = shl i64 [[TMP17]], 32 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = or i64 [[TMP16]], [[TMP18]] +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP19]], 0 +; PURECAP-IR-NEXT: [[TMP21:%.*]] = insertvalue { i64, i1 } [[TMP20]], i1 [[TMP13]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP21]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP21]], 0 ; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] ; PURECAP-IR: atomicrmw.end: ; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] @@ -913,117 +1818,221 @@ define i64 @atomic_min(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @atomic_umax(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_umax: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -48 -; PURECAP-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: cmove cs3, ca0 -; PURECAP-NEXT: clw a5, 4(ca0) -; PURECAP-NEXT: clw a4, 0(ca0) -; PURECAP-NEXT: mv s1, a2 -; PURECAP-NEXT: mv s2, a1 -; PURECAP-NEXT: cincoffset ca0, csp, 0 -; PURECAP-NEXT: csetbounds cs0, ca0, 8 -; PURECAP-NEXT: j .LBB11_2 -; PURECAP-NEXT: .LBB11_1: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 -; PURECAP-NEXT: csw a4, 0(csp) -; PURECAP-NEXT: csw a5, 4(csp) -; PURECAP-NEXT: li a4, 5 -; PURECAP-NEXT: li a5, 5 -; PURECAP-NEXT: cmove ca0, cs3 -; PURECAP-NEXT: cmove ca1, cs0 -; PURECAP-NEXT: ccall __atomic_compare_exchange_8 -; PURECAP-NEXT: clw a5, 4(csp) -; PURECAP-NEXT: clw a4, 0(csp) -; PURECAP-NEXT: bnez a0, .LBB11_7 -; PURECAP-NEXT: .LBB11_2: # %atomicrmw.start -; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 -; PURECAP-NEXT: beq a5, s1, .LBB11_4 -; PURECAP-NEXT: # %bb.3: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 -; PURECAP-NEXT: sltu a0, s1, a5 -; PURECAP-NEXT: j .LBB11_5 -; PURECAP-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 -; PURECAP-NEXT: sltu a0, s2, a4 -; PURECAP-NEXT: .LBB11_5: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 -; PURECAP-NEXT: mv a2, a4 -; PURECAP-NEXT: mv a3, a5 -; PURECAP-NEXT: bnez a0, .LBB11_1 -; PURECAP-NEXT: # %bb.6: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 -; PURECAP-NEXT: mv a2, s2 -; PURECAP-NEXT: mv a3, s1 -; PURECAP-NEXT: j .LBB11_1 -; PURECAP-NEXT: .LBB11_7: # %atomicrmw.end -; PURECAP-NEXT: mv a0, a4 -; PURECAP-NEXT: mv a1, a5 -; PURECAP-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 48 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_umax: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -32 -; HYBRID-NEXT: sw ra, 28(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s0, 24(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s1, 20(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s2, 16(sp) # 4-byte Folded Spill -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: lw a5, 4(a0) -; HYBRID-NEXT: lw a4, 0(a0) -; HYBRID-NEXT: mv s1, a2 -; HYBRID-NEXT: mv s2, a1 -; HYBRID-NEXT: j .LBB11_2 -; HYBRID-NEXT: .LBB11_1: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 -; HYBRID-NEXT: sw a4, 8(sp) -; HYBRID-NEXT: sw a5, 12(sp) -; HYBRID-NEXT: addi a1, sp, 8 -; HYBRID-NEXT: li a4, 5 -; HYBRID-NEXT: li a5, 5 -; HYBRID-NEXT: mv a0, s0 -; HYBRID-NEXT: call __atomic_compare_exchange_8@plt -; HYBRID-NEXT: lw a5, 12(sp) -; HYBRID-NEXT: lw a4, 8(sp) -; HYBRID-NEXT: bnez a0, .LBB11_7 -; HYBRID-NEXT: .LBB11_2: # %atomicrmw.start -; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 -; HYBRID-NEXT: beq a5, s1, .LBB11_4 -; HYBRID-NEXT: # %bb.3: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 -; HYBRID-NEXT: sltu a0, s1, a5 -; HYBRID-NEXT: j .LBB11_5 -; HYBRID-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 -; HYBRID-NEXT: sltu a0, s2, a4 -; HYBRID-NEXT: .LBB11_5: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 -; HYBRID-NEXT: mv a2, a4 -; HYBRID-NEXT: mv a3, a5 -; HYBRID-NEXT: bnez a0, .LBB11_1 -; HYBRID-NEXT: # %bb.6: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 -; HYBRID-NEXT: mv a2, s2 -; HYBRID-NEXT: mv a3, s1 -; HYBRID-NEXT: j .LBB11_1 -; HYBRID-NEXT: .LBB11_7: # %atomicrmw.end -; HYBRID-NEXT: mv a0, a4 -; HYBRID-NEXT: mv a1, a5 -; HYBRID-NEXT: lw ra, 28(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s0, 24(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s1, 20(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s2, 16(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 32 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_umax: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: j .LBB11_2 +; PURECAP-ATOMICS-NEXT: .LBB11_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB11_8: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB11_10 +; PURECAP-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_8 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB11_8 +; PURECAP-ATOMICS-NEXT: .LBB11_10: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB11_7 +; PURECAP-ATOMICS-NEXT: .LBB11_2: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB11_8 Depth 2 +; PURECAP-ATOMICS-NEXT: beq a3, a2, .LBB11_4 +; PURECAP-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a6, a2, a3 +; PURECAP-ATOMICS-NEXT: j .LBB11_5 +; PURECAP-ATOMICS-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a6, a1, a4 +; PURECAP-ATOMICS-NEXT: .LBB11_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a3 +; PURECAP-ATOMICS-NEXT: mv a7, a4 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB11_1 +; PURECAP-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a2 +; PURECAP-ATOMICS-NEXT: mv a7, a1 +; PURECAP-ATOMICS-NEXT: j .LBB11_1 +; PURECAP-ATOMICS-NEXT: .LBB11_7: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_umax: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -48 +; PURECAP-LIBCALLS-NEXT: csc cra, 40(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: cmove cs3, ca0 +; PURECAP-LIBCALLS-NEXT: clw a5, 4(ca0) +; PURECAP-LIBCALLS-NEXT: clw a4, 0(ca0) +; PURECAP-LIBCALLS-NEXT: mv s1, a2 +; PURECAP-LIBCALLS-NEXT: mv s2, a1 +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds cs0, ca0, 8 +; PURECAP-LIBCALLS-NEXT: j .LBB11_2 +; PURECAP-LIBCALLS-NEXT: .LBB11_1: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: csw a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: csw a5, 4(csp) +; PURECAP-LIBCALLS-NEXT: li a4, 5 +; PURECAP-LIBCALLS-NEXT: li a5, 5 +; PURECAP-LIBCALLS-NEXT: cmove ca0, cs3 +; PURECAP-LIBCALLS-NEXT: cmove ca1, cs0 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-LIBCALLS-NEXT: clw a5, 4(csp) +; PURECAP-LIBCALLS-NEXT: clw a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB11_7 +; PURECAP-LIBCALLS-NEXT: .LBB11_2: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-LIBCALLS-NEXT: beq a5, s1, .LBB11_4 +; PURECAP-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s1, a5 +; PURECAP-LIBCALLS-NEXT: j .LBB11_5 +; PURECAP-LIBCALLS-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s2, a4 +; PURECAP-LIBCALLS-NEXT: .LBB11_5: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, a4 +; PURECAP-LIBCALLS-NEXT: mv a3, a5 +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB11_1 +; PURECAP-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, s2 +; PURECAP-LIBCALLS-NEXT: mv a3, s1 +; PURECAP-LIBCALLS-NEXT: j .LBB11_1 +; PURECAP-LIBCALLS-NEXT: .LBB11_7: # %atomicrmw.end +; PURECAP-LIBCALLS-NEXT: mv a0, a4 +; PURECAP-LIBCALLS-NEXT: mv a1, a5 +; PURECAP-LIBCALLS-NEXT: clc cra, 40(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 48 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_umax: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: j .LBB11_2 +; HYBRID-ATOMICS-NEXT: .LBB11_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB11_8: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB11_10 +; HYBRID-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_8 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB11_8 +; HYBRID-ATOMICS-NEXT: .LBB11_10: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB11_7 +; HYBRID-ATOMICS-NEXT: .LBB11_2: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB11_8 Depth 2 +; HYBRID-ATOMICS-NEXT: beq a3, a2, .LBB11_4 +; HYBRID-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a6, a2, a3 +; HYBRID-ATOMICS-NEXT: j .LBB11_5 +; HYBRID-ATOMICS-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a6, a1, a4 +; HYBRID-ATOMICS-NEXT: .LBB11_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a3 +; HYBRID-ATOMICS-NEXT: mv a7, a4 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB11_1 +; HYBRID-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a2 +; HYBRID-ATOMICS-NEXT: mv a7, a1 +; HYBRID-ATOMICS-NEXT: j .LBB11_1 +; HYBRID-ATOMICS-NEXT: .LBB11_7: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_umax: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -32 +; HYBRID-LIBCALLS-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: lw a5, 4(a0) +; HYBRID-LIBCALLS-NEXT: lw a4, 0(a0) +; HYBRID-LIBCALLS-NEXT: mv s1, a2 +; HYBRID-LIBCALLS-NEXT: mv s2, a1 +; HYBRID-LIBCALLS-NEXT: j .LBB11_2 +; HYBRID-LIBCALLS-NEXT: .LBB11_1: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sw a4, 8(sp) +; HYBRID-LIBCALLS-NEXT: sw a5, 12(sp) +; HYBRID-LIBCALLS-NEXT: addi a1, sp, 8 +; HYBRID-LIBCALLS-NEXT: li a4, 5 +; HYBRID-LIBCALLS-NEXT: li a5, 5 +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-LIBCALLS-NEXT: lw a5, 12(sp) +; HYBRID-LIBCALLS-NEXT: lw a4, 8(sp) +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB11_7 +; HYBRID-LIBCALLS-NEXT: .LBB11_2: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-LIBCALLS-NEXT: beq a5, s1, .LBB11_4 +; HYBRID-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s1, a5 +; HYBRID-LIBCALLS-NEXT: j .LBB11_5 +; HYBRID-LIBCALLS-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s2, a4 +; HYBRID-LIBCALLS-NEXT: .LBB11_5: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, a4 +; HYBRID-LIBCALLS-NEXT: mv a3, a5 +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB11_1 +; HYBRID-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, s2 +; HYBRID-LIBCALLS-NEXT: mv a3, s1 +; HYBRID-LIBCALLS-NEXT: j .LBB11_1 +; HYBRID-LIBCALLS-NEXT: .LBB11_7: # %atomicrmw.end +; HYBRID-LIBCALLS-NEXT: mv a0, a4 +; HYBRID-LIBCALLS-NEXT: mv a1, a5 +; HYBRID-LIBCALLS-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 32 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_umax: ; HYBRID-CAP-PTR: # %bb.0: @@ -1079,22 +2088,33 @@ define i64 @atomic_umax(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_umax ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) -; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 ; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] ; PURECAP-IR: atomicrmw.start: -; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] -; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[LOADED]], [[VAL]] -; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i64 [[LOADED]], ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 5, i32 5) -; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 -; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 -; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 -; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VAL]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP4:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP3]], i32 [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP8:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP7]], i32 [[TMP9]]) +; PURECAP-IR-NEXT: [[TMP11:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP6]], ptr addrspace(200) [[TMP10]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 0 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 1 +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = zext i32 [[TMP15]] to i64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = shl i64 [[TMP17]], 32 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = or i64 [[TMP16]], [[TMP18]] +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP19]], 0 +; PURECAP-IR-NEXT: [[TMP21:%.*]] = insertvalue { i64, i1 } [[TMP20]], i1 [[TMP13]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP21]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP21]], 0 ; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] ; PURECAP-IR: atomicrmw.end: ; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] @@ -1126,119 +2146,225 @@ define i64 @atomic_umax(ptr addrspace(200) %ptr, i64 %val) nounwind { } define i64 @atomic_umin(ptr addrspace(200) %ptr, i64 %val) nounwind { -; PURECAP-LABEL: atomic_umin: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -48 -; PURECAP-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; PURECAP-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; PURECAP-NEXT: cmove cs3, ca0 -; PURECAP-NEXT: clw a5, 4(ca0) -; PURECAP-NEXT: clw a4, 0(ca0) -; PURECAP-NEXT: mv s1, a2 -; PURECAP-NEXT: mv s2, a1 -; PURECAP-NEXT: cincoffset ca0, csp, 0 -; PURECAP-NEXT: csetbounds cs0, ca0, 8 -; PURECAP-NEXT: j .LBB12_2 -; PURECAP-NEXT: .LBB12_1: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 -; PURECAP-NEXT: csw a4, 0(csp) -; PURECAP-NEXT: csw a5, 4(csp) -; PURECAP-NEXT: li a4, 5 -; PURECAP-NEXT: li a5, 5 -; PURECAP-NEXT: cmove ca0, cs3 -; PURECAP-NEXT: cmove ca1, cs0 -; PURECAP-NEXT: ccall __atomic_compare_exchange_8 -; PURECAP-NEXT: clw a5, 4(csp) -; PURECAP-NEXT: clw a4, 0(csp) -; PURECAP-NEXT: bnez a0, .LBB12_7 -; PURECAP-NEXT: .LBB12_2: # %atomicrmw.start -; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 -; PURECAP-NEXT: beq a5, s1, .LBB12_4 -; PURECAP-NEXT: # %bb.3: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 -; PURECAP-NEXT: sltu a0, s1, a5 -; PURECAP-NEXT: j .LBB12_5 -; PURECAP-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 -; PURECAP-NEXT: sltu a0, s2, a4 -; PURECAP-NEXT: .LBB12_5: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 -; PURECAP-NEXT: xori a0, a0, 1 -; PURECAP-NEXT: mv a2, a4 -; PURECAP-NEXT: mv a3, a5 -; PURECAP-NEXT: bnez a0, .LBB12_1 -; PURECAP-NEXT: # %bb.6: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 -; PURECAP-NEXT: mv a2, s2 -; PURECAP-NEXT: mv a3, s1 -; PURECAP-NEXT: j .LBB12_1 -; PURECAP-NEXT: .LBB12_7: # %atomicrmw.end -; PURECAP-NEXT: mv a0, a4 -; PURECAP-NEXT: mv a1, a5 -; PURECAP-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; PURECAP-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 48 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_umin: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -32 -; HYBRID-NEXT: sw ra, 28(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s0, 24(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s1, 20(sp) # 4-byte Folded Spill -; HYBRID-NEXT: sw s2, 16(sp) # 4-byte Folded Spill -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: lw a5, 4(a0) -; HYBRID-NEXT: lw a4, 0(a0) -; HYBRID-NEXT: mv s1, a2 -; HYBRID-NEXT: mv s2, a1 -; HYBRID-NEXT: j .LBB12_2 -; HYBRID-NEXT: .LBB12_1: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 -; HYBRID-NEXT: sw a4, 8(sp) -; HYBRID-NEXT: sw a5, 12(sp) -; HYBRID-NEXT: addi a1, sp, 8 -; HYBRID-NEXT: li a4, 5 -; HYBRID-NEXT: li a5, 5 -; HYBRID-NEXT: mv a0, s0 -; HYBRID-NEXT: call __atomic_compare_exchange_8@plt -; HYBRID-NEXT: lw a5, 12(sp) -; HYBRID-NEXT: lw a4, 8(sp) -; HYBRID-NEXT: bnez a0, .LBB12_7 -; HYBRID-NEXT: .LBB12_2: # %atomicrmw.start -; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 -; HYBRID-NEXT: beq a5, s1, .LBB12_4 -; HYBRID-NEXT: # %bb.3: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 -; HYBRID-NEXT: sltu a0, s1, a5 -; HYBRID-NEXT: j .LBB12_5 -; HYBRID-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 -; HYBRID-NEXT: sltu a0, s2, a4 -; HYBRID-NEXT: .LBB12_5: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 -; HYBRID-NEXT: xori a0, a0, 1 -; HYBRID-NEXT: mv a2, a4 -; HYBRID-NEXT: mv a3, a5 -; HYBRID-NEXT: bnez a0, .LBB12_1 -; HYBRID-NEXT: # %bb.6: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 -; HYBRID-NEXT: mv a2, s2 -; HYBRID-NEXT: mv a3, s1 -; HYBRID-NEXT: j .LBB12_1 -; HYBRID-NEXT: .LBB12_7: # %atomicrmw.end -; HYBRID-NEXT: mv a0, a4 -; HYBRID-NEXT: mv a1, a5 -; HYBRID-NEXT: lw ra, 28(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s0, 24(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s1, 20(sp) # 4-byte Folded Reload -; HYBRID-NEXT: lw s2, 16(sp) # 4-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 32 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_umin: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) +; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: j .LBB12_2 +; PURECAP-ATOMICS-NEXT: .LBB12_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB12_8: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB12_10 +; PURECAP-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_8 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB12_8 +; PURECAP-ATOMICS-NEXT: .LBB12_10: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB12_7 +; PURECAP-ATOMICS-NEXT: .LBB12_2: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB12_8 Depth 2 +; PURECAP-ATOMICS-NEXT: beq a3, a2, .LBB12_4 +; PURECAP-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a5, a2, a3 +; PURECAP-ATOMICS-NEXT: j .LBB12_5 +; PURECAP-ATOMICS-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a5, a1, a4 +; PURECAP-ATOMICS-NEXT: .LBB12_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: xori a6, a5, 1 +; PURECAP-ATOMICS-NEXT: mv a5, a3 +; PURECAP-ATOMICS-NEXT: mv a7, a4 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB12_1 +; PURECAP-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a2 +; PURECAP-ATOMICS-NEXT: mv a7, a1 +; PURECAP-ATOMICS-NEXT: j .LBB12_1 +; PURECAP-ATOMICS-NEXT: .LBB12_7: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_umin: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -48 +; PURECAP-LIBCALLS-NEXT: csc cra, 40(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: cmove cs3, ca0 +; PURECAP-LIBCALLS-NEXT: clw a5, 4(ca0) +; PURECAP-LIBCALLS-NEXT: clw a4, 0(ca0) +; PURECAP-LIBCALLS-NEXT: mv s1, a2 +; PURECAP-LIBCALLS-NEXT: mv s2, a1 +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds cs0, ca0, 8 +; PURECAP-LIBCALLS-NEXT: j .LBB12_2 +; PURECAP-LIBCALLS-NEXT: .LBB12_1: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: csw a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: csw a5, 4(csp) +; PURECAP-LIBCALLS-NEXT: li a4, 5 +; PURECAP-LIBCALLS-NEXT: li a5, 5 +; PURECAP-LIBCALLS-NEXT: cmove ca0, cs3 +; PURECAP-LIBCALLS-NEXT: cmove ca1, cs0 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_8 +; PURECAP-LIBCALLS-NEXT: clw a5, 4(csp) +; PURECAP-LIBCALLS-NEXT: clw a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB12_7 +; PURECAP-LIBCALLS-NEXT: .LBB12_2: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-LIBCALLS-NEXT: beq a5, s1, .LBB12_4 +; PURECAP-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s1, a5 +; PURECAP-LIBCALLS-NEXT: j .LBB12_5 +; PURECAP-LIBCALLS-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s2, a4 +; PURECAP-LIBCALLS-NEXT: .LBB12_5: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: xori a0, a0, 1 +; PURECAP-LIBCALLS-NEXT: mv a2, a4 +; PURECAP-LIBCALLS-NEXT: mv a3, a5 +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB12_1 +; PURECAP-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, s2 +; PURECAP-LIBCALLS-NEXT: mv a3, s1 +; PURECAP-LIBCALLS-NEXT: j .LBB12_1 +; PURECAP-LIBCALLS-NEXT: .LBB12_7: # %atomicrmw.end +; PURECAP-LIBCALLS-NEXT: mv a0, a4 +; PURECAP-LIBCALLS-NEXT: mv a1, a5 +; PURECAP-LIBCALLS-NEXT: clc cra, 40(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 48 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_umin: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) +; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) +; HYBRID-ATOMICS-NEXT: j .LBB12_2 +; HYBRID-ATOMICS-NEXT: .LBB12_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB12_8: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB12_10 +; HYBRID-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_8 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB12_8 +; HYBRID-ATOMICS-NEXT: .LBB12_10: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB12_7 +; HYBRID-ATOMICS-NEXT: .LBB12_2: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB12_8 Depth 2 +; HYBRID-ATOMICS-NEXT: beq a3, a2, .LBB12_4 +; HYBRID-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a5, a2, a3 +; HYBRID-ATOMICS-NEXT: j .LBB12_5 +; HYBRID-ATOMICS-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a5, a1, a4 +; HYBRID-ATOMICS-NEXT: .LBB12_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: xori a6, a5, 1 +; HYBRID-ATOMICS-NEXT: mv a5, a3 +; HYBRID-ATOMICS-NEXT: mv a7, a4 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB12_1 +; HYBRID-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a2 +; HYBRID-ATOMICS-NEXT: mv a7, a1 +; HYBRID-ATOMICS-NEXT: j .LBB12_1 +; HYBRID-ATOMICS-NEXT: .LBB12_7: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_umin: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -32 +; HYBRID-LIBCALLS-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: lw a5, 4(a0) +; HYBRID-LIBCALLS-NEXT: lw a4, 0(a0) +; HYBRID-LIBCALLS-NEXT: mv s1, a2 +; HYBRID-LIBCALLS-NEXT: mv s2, a1 +; HYBRID-LIBCALLS-NEXT: j .LBB12_2 +; HYBRID-LIBCALLS-NEXT: .LBB12_1: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sw a4, 8(sp) +; HYBRID-LIBCALLS-NEXT: sw a5, 12(sp) +; HYBRID-LIBCALLS-NEXT: addi a1, sp, 8 +; HYBRID-LIBCALLS-NEXT: li a4, 5 +; HYBRID-LIBCALLS-NEXT: li a5, 5 +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_8@plt +; HYBRID-LIBCALLS-NEXT: lw a5, 12(sp) +; HYBRID-LIBCALLS-NEXT: lw a4, 8(sp) +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB12_7 +; HYBRID-LIBCALLS-NEXT: .LBB12_2: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-LIBCALLS-NEXT: beq a5, s1, .LBB12_4 +; HYBRID-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s1, a5 +; HYBRID-LIBCALLS-NEXT: j .LBB12_5 +; HYBRID-LIBCALLS-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s2, a4 +; HYBRID-LIBCALLS-NEXT: .LBB12_5: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: xori a0, a0, 1 +; HYBRID-LIBCALLS-NEXT: mv a2, a4 +; HYBRID-LIBCALLS-NEXT: mv a3, a5 +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB12_1 +; HYBRID-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, s2 +; HYBRID-LIBCALLS-NEXT: mv a3, s1 +; HYBRID-LIBCALLS-NEXT: j .LBB12_1 +; HYBRID-LIBCALLS-NEXT: .LBB12_7: # %atomicrmw.end +; HYBRID-LIBCALLS-NEXT: mv a0, a4 +; HYBRID-LIBCALLS-NEXT: mv a1, a5 +; HYBRID-LIBCALLS-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 32 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_umin: ; HYBRID-CAP-PTR: # %bb.0: @@ -1295,22 +2421,33 @@ define i64 @atomic_umin(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_umin ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i64, align 8, addrspace(200) -; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 ; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] ; PURECAP-IR: atomicrmw.start: -; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] -; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp ule i64 [[LOADED]], [[VAL]] -; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 [[LOADED]], i64 [[VAL]] -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i64 [[LOADED]], ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i64 [[NEW]], i32 5, i32 5) -; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i64, ptr addrspace(200) [[TMP1]], align 8 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 8, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP5]], 0 -; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i64, i1 } [[TMP6]], i1 [[TMP4]], 1 -; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1 -; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = icmp ule i64 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VAL]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP4:%.*]] = lshr i64 [[LOADED]], 32 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP3]], i32 [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[NEW]] +; PURECAP-IR-NEXT: [[TMP8:%.*]] = lshr i64 [[NEW]], 32 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP7]], i32 [[TMP9]]) +; PURECAP-IR-NEXT: [[TMP11:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP6]], ptr addrspace(200) [[TMP10]] seq_cst seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 0 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 1 +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = zext i32 [[TMP15]] to i64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = shl i64 [[TMP17]], 32 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = or i64 [[TMP16]], [[TMP18]] +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP19]], 0 +; PURECAP-IR-NEXT: [[TMP21:%.*]] = insertvalue { i64, i1 } [[TMP20]], i1 [[TMP13]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP21]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP21]], 0 ; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] ; PURECAP-IR: atomicrmw.end: ; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll index 80416d25a1bc..e7e4f29a53c6 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll @@ -176,25 +176,89 @@ define i128 @load(ptr addrspace(200) %ptr) nounwind { } define i128 @atomic_xchg(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_xchg: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_exchange_16 -; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_xchg: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_exchange_16@plt -; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_xchg: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB2_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB2_3 Depth 2 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a1 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a2 +; PURECAP-ATOMICS-NEXT: .LBB2_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB2_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB2_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB2_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB2_3 +; PURECAP-ATOMICS-NEXT: .LBB2_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB2_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB2_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_xchg: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_exchange_16 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_xchg: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB2_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB2_3 Depth 2 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a1 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a2 +; HYBRID-ATOMICS-NEXT: .LBB2_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB2_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB2_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB2_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB2_3 +; HYBRID-ATOMICS-NEXT: .LBB2_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB2_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB2_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_xchg: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_exchange_16@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_xchg: ; HYBRID-CAP-PTR: # %bb.0: @@ -207,8 +271,34 @@ define i128 @atomic_xchg(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xchg ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_exchange_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP2]], i64 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[VAL]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i128 [[VAL]], 64 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i128 [[TMP7]] to i64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP6]], i64 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i64 [[TMP13]] to i128 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i128 [[TMP16]], 64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i128 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xchg ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { @@ -220,25 +310,97 @@ define i128 @atomic_xchg(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @atomic_add(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_add: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_add_16 -; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_add: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_add_16@plt -; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_add: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB3_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB3_3 Depth 2 +; PURECAP-ATOMICS-NEXT: add a5, a4, a1 +; PURECAP-ATOMICS-NEXT: sltu a6, a5, a4 +; PURECAP-ATOMICS-NEXT: add a7, a3, a2 +; PURECAP-ATOMICS-NEXT: add a6, a7, a6 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a5 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a6 +; PURECAP-ATOMICS-NEXT: .LBB3_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB3_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB3_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB3_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB3_3 +; PURECAP-ATOMICS-NEXT: .LBB3_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB3_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB3_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_add: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_add_16 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_add: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB3_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB3_3 Depth 2 +; HYBRID-ATOMICS-NEXT: add a5, a4, a1 +; HYBRID-ATOMICS-NEXT: sltu a6, a5, a4 +; HYBRID-ATOMICS-NEXT: add a7, a3, a2 +; HYBRID-ATOMICS-NEXT: add a6, a7, a6 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a5 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a6 +; HYBRID-ATOMICS-NEXT: .LBB3_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB3_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB3_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB3_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB3_3 +; HYBRID-ATOMICS-NEXT: .LBB3_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB3_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB3_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_add: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_add_16@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_add: ; HYBRID-CAP-PTR: # %bb.0: @@ -251,8 +413,35 @@ define i128 @atomic_add(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_add ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_add_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[NEW:%.*]] = add i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP2]], i64 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i128 [[TMP7]] to i64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP6]], i64 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i64 [[TMP13]] to i128 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i128 [[TMP16]], 64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i128 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_add ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { @@ -264,25 +453,97 @@ define i128 @atomic_add(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @atomic_sub(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_sub: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_sub_16 -; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_sub: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_sub_16@plt -; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_sub: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB4_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB4_3 Depth 2 +; PURECAP-ATOMICS-NEXT: sltu a5, a4, a1 +; PURECAP-ATOMICS-NEXT: sub a6, a3, a2 +; PURECAP-ATOMICS-NEXT: sub a5, a6, a5 +; PURECAP-ATOMICS-NEXT: sub a6, a4, a1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB4_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB4_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB4_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB4_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB4_3 +; PURECAP-ATOMICS-NEXT: .LBB4_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB4_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB4_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_sub: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_sub_16 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_sub: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB4_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB4_3 Depth 2 +; HYBRID-ATOMICS-NEXT: sltu a5, a4, a1 +; HYBRID-ATOMICS-NEXT: sub a6, a3, a2 +; HYBRID-ATOMICS-NEXT: sub a5, a6, a5 +; HYBRID-ATOMICS-NEXT: sub a6, a4, a1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB4_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB4_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB4_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB4_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB4_3 +; HYBRID-ATOMICS-NEXT: .LBB4_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB4_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB4_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_sub: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_sub_16@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_sub: ; HYBRID-CAP-PTR: # %bb.0: @@ -295,8 +556,35 @@ define i128 @atomic_sub(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_sub ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_sub_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[NEW:%.*]] = sub i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP2]], i64 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i128 [[TMP7]] to i64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP6]], i64 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i64 [[TMP13]] to i128 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i128 [[TMP16]], 64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i128 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_sub ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { @@ -308,25 +596,93 @@ define i128 @atomic_sub(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @atomic_and(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_and: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_and_16 -; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_and: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_and_16@plt -; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_and: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB5_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB5_3 Depth 2 +; PURECAP-ATOMICS-NEXT: and a5, a3, a2 +; PURECAP-ATOMICS-NEXT: and a6, a4, a1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB5_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB5_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB5_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB5_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB5_3 +; PURECAP-ATOMICS-NEXT: .LBB5_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB5_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB5_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_and: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_and_16 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_and: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB5_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB5_3 Depth 2 +; HYBRID-ATOMICS-NEXT: and a5, a3, a2 +; HYBRID-ATOMICS-NEXT: and a6, a4, a1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB5_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB5_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB5_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB5_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB5_3 +; HYBRID-ATOMICS-NEXT: .LBB5_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB5_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB5_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_and: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_and_16@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_and: ; HYBRID-CAP-PTR: # %bb.0: @@ -339,8 +695,35 @@ define i128 @atomic_and(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_and ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_and_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[NEW:%.*]] = and i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP2]], i64 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i128 [[TMP7]] to i64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP6]], i64 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i64 [[TMP13]] to i128 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i128 [[TMP16]], 64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i128 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_and ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { @@ -352,25 +735,97 @@ define i128 @atomic_and(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @atomic_nand(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_nand: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_nand_16 -; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_nand: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_nand_16@plt -; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_nand: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB6_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB6_3 Depth 2 +; PURECAP-ATOMICS-NEXT: and a5, a4, a1 +; PURECAP-ATOMICS-NEXT: and a6, a3, a2 +; PURECAP-ATOMICS-NEXT: not a6, a6 +; PURECAP-ATOMICS-NEXT: not a5, a5 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a5 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a6 +; PURECAP-ATOMICS-NEXT: .LBB6_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB6_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB6_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB6_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB6_3 +; PURECAP-ATOMICS-NEXT: .LBB6_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB6_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB6_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_nand: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_nand_16 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_nand: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB6_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB6_3 Depth 2 +; HYBRID-ATOMICS-NEXT: and a5, a4, a1 +; HYBRID-ATOMICS-NEXT: and a6, a3, a2 +; HYBRID-ATOMICS-NEXT: not a6, a6 +; HYBRID-ATOMICS-NEXT: not a5, a5 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a5 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a6 +; HYBRID-ATOMICS-NEXT: .LBB6_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB6_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB6_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB6_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB6_3 +; HYBRID-ATOMICS-NEXT: .LBB6_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB6_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB6_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_nand: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_nand_16@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_nand: ; HYBRID-CAP-PTR: # %bb.0: @@ -383,8 +838,36 @@ define i128 @atomic_nand(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_nand ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_nand_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = and i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = xor i128 [[TMP2]], -1 +; PURECAP-IR-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP4:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP3]], i64 [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP8:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = trunc i128 [[TMP8]] to i64 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP7]], i64 [[TMP9]]) +; PURECAP-IR-NEXT: [[TMP11:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP6]], ptr addrspace(200) [[TMP10]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 0 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 1 +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = zext i64 [[TMP15]] to i128 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = shl i128 [[TMP17]], 64 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = or i128 [[TMP16]], [[TMP18]] +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP19]], 0 +; PURECAP-IR-NEXT: [[TMP21:%.*]] = insertvalue { i128, i1 } [[TMP20]], i1 [[TMP13]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP21]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP21]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_nand ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { @@ -396,25 +879,93 @@ define i128 @atomic_nand(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @atomic_or(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_or: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_or_16 -; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_or: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_or_16@plt -; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_or: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB7_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB7_3 Depth 2 +; PURECAP-ATOMICS-NEXT: or a5, a3, a2 +; PURECAP-ATOMICS-NEXT: or a6, a4, a1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB7_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB7_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB7_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB7_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB7_3 +; PURECAP-ATOMICS-NEXT: .LBB7_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB7_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB7_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_or: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_or_16 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_or: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB7_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB7_3 Depth 2 +; HYBRID-ATOMICS-NEXT: or a5, a3, a2 +; HYBRID-ATOMICS-NEXT: or a6, a4, a1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB7_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB7_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB7_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB7_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB7_3 +; HYBRID-ATOMICS-NEXT: .LBB7_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB7_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB7_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_or: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_or_16@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_or: ; HYBRID-CAP-PTR: # %bb.0: @@ -427,8 +978,35 @@ define i128 @atomic_or(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_or ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_or_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[NEW:%.*]] = or i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP2]], i64 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i128 [[TMP7]] to i64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP6]], i64 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i64 [[TMP13]] to i128 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i128 [[TMP16]], 64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i128 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_or ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { @@ -440,25 +1018,93 @@ define i128 @atomic_or(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @atomic_xor(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_xor: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -16 -; PURECAP-NEXT: csc cra, 0(csp) # 16-byte Folded Spill -; PURECAP-NEXT: li a3, 5 -; PURECAP-NEXT: ccall __atomic_fetch_xor_16 -; PURECAP-NEXT: clc cra, 0(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 16 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_xor: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -16 -; HYBRID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; HYBRID-NEXT: li a3, 5 -; HYBRID-NEXT: call __atomic_fetch_xor_16@plt -; HYBRID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 16 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_xor: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: .LBB8_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB8_3 Depth 2 +; PURECAP-ATOMICS-NEXT: xor a5, a3, a2 +; PURECAP-ATOMICS-NEXT: xor a6, a4, a1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB8_3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB8_1 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB8_5 +; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB8_3 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB8_3 +; PURECAP-ATOMICS-NEXT: .LBB8_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB8_1 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB8_1 +; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_xor: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a3, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_fetch_xor_16 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_xor: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: .LBB8_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB8_3 Depth 2 +; HYBRID-ATOMICS-NEXT: xor a5, a3, a2 +; HYBRID-ATOMICS-NEXT: xor a6, a4, a1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a6 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB8_3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB8_1 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB8_5 +; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB8_3 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB8_3 +; HYBRID-ATOMICS-NEXT: .LBB8_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB8_1 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB8_1 +; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_xor: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a3, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_fetch_xor_16@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_xor: ; HYBRID-CAP-PTR: # %bb.0: @@ -471,8 +1117,35 @@ define i128 @atomic_xor(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xor ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_fetch_xor_16(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) -; PURECAP-IR-NEXT: ret i128 [[TMP1]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] +; PURECAP-IR: atomicrmw.start: +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[NEW:%.*]] = xor i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP2]], i64 [[TMP4]]) +; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i128 [[TMP7]] to i64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP6]], i64 [[TMP8]]) +; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP11]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i64 [[TMP13]] to i128 +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i128 [[TMP16]], 64 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i128 [[TMP15]], [[TMP17]] +; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP18]], 0 +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } [[TMP19]], i1 [[TMP12]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP20]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP20]], 0 +; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] +; PURECAP-IR: atomicrmw.end: +; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xor ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { @@ -484,117 +1157,221 @@ define i128 @atomic_xor(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @atomic_max(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_max: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -96 -; PURECAP-NEXT: csc cra, 80(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill -; PURECAP-NEXT: cmove cs3, ca0 -; PURECAP-NEXT: cld a5, 8(ca0) -; PURECAP-NEXT: cld a4, 0(ca0) -; PURECAP-NEXT: mv s1, a2 -; PURECAP-NEXT: mv s2, a1 -; PURECAP-NEXT: cincoffset ca0, csp, 0 -; PURECAP-NEXT: csetbounds cs0, ca0, 16 -; PURECAP-NEXT: j .LBB9_2 -; PURECAP-NEXT: .LBB9_1: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 -; PURECAP-NEXT: csd a4, 0(csp) -; PURECAP-NEXT: csd a5, 8(csp) -; PURECAP-NEXT: li a4, 5 -; PURECAP-NEXT: li a5, 5 -; PURECAP-NEXT: cmove ca0, cs3 -; PURECAP-NEXT: cmove ca1, cs0 -; PURECAP-NEXT: ccall __atomic_compare_exchange_16 -; PURECAP-NEXT: cld a5, 8(csp) -; PURECAP-NEXT: cld a4, 0(csp) -; PURECAP-NEXT: bnez a0, .LBB9_7 -; PURECAP-NEXT: .LBB9_2: # %atomicrmw.start -; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 -; PURECAP-NEXT: beq a5, s1, .LBB9_4 -; PURECAP-NEXT: # %bb.3: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 -; PURECAP-NEXT: slt a0, s1, a5 -; PURECAP-NEXT: j .LBB9_5 -; PURECAP-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 -; PURECAP-NEXT: sltu a0, s2, a4 -; PURECAP-NEXT: .LBB9_5: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 -; PURECAP-NEXT: mv a2, a4 -; PURECAP-NEXT: mv a3, a5 -; PURECAP-NEXT: bnez a0, .LBB9_1 -; PURECAP-NEXT: # %bb.6: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB9_2 Depth=1 -; PURECAP-NEXT: mv a2, s2 -; PURECAP-NEXT: mv a3, s1 -; PURECAP-NEXT: j .LBB9_1 -; PURECAP-NEXT: .LBB9_7: # %atomicrmw.end -; PURECAP-NEXT: mv a0, a4 -; PURECAP-NEXT: mv a1, a5 -; PURECAP-NEXT: clc cra, 80(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 96 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_max: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -48 -; HYBRID-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s0, 32(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s1, 24(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s2, 16(sp) # 8-byte Folded Spill -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: ld a5, 8(a0) -; HYBRID-NEXT: ld a4, 0(a0) -; HYBRID-NEXT: mv s1, a2 -; HYBRID-NEXT: mv s2, a1 -; HYBRID-NEXT: j .LBB9_2 -; HYBRID-NEXT: .LBB9_1: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 -; HYBRID-NEXT: sd a4, 0(sp) -; HYBRID-NEXT: sd a5, 8(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a4, 5 -; HYBRID-NEXT: li a5, 5 -; HYBRID-NEXT: mv a0, s0 -; HYBRID-NEXT: call __atomic_compare_exchange_16@plt -; HYBRID-NEXT: ld a5, 8(sp) -; HYBRID-NEXT: ld a4, 0(sp) -; HYBRID-NEXT: bnez a0, .LBB9_7 -; HYBRID-NEXT: .LBB9_2: # %atomicrmw.start -; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 -; HYBRID-NEXT: beq a5, s1, .LBB9_4 -; HYBRID-NEXT: # %bb.3: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 -; HYBRID-NEXT: slt a0, s1, a5 -; HYBRID-NEXT: j .LBB9_5 -; HYBRID-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 -; HYBRID-NEXT: sltu a0, s2, a4 -; HYBRID-NEXT: .LBB9_5: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 -; HYBRID-NEXT: mv a2, a4 -; HYBRID-NEXT: mv a3, a5 -; HYBRID-NEXT: bnez a0, .LBB9_1 -; HYBRID-NEXT: # %bb.6: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB9_2 Depth=1 -; HYBRID-NEXT: mv a2, s2 -; HYBRID-NEXT: mv a3, s1 -; HYBRID-NEXT: j .LBB9_1 -; HYBRID-NEXT: .LBB9_7: # %atomicrmw.end -; HYBRID-NEXT: mv a0, a4 -; HYBRID-NEXT: mv a1, a5 -; HYBRID-NEXT: ld ra, 40(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s0, 32(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s1, 24(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s2, 16(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 48 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_max: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: j .LBB9_2 +; PURECAP-ATOMICS-NEXT: .LBB9_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB9_8: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB9_10 +; PURECAP-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_8 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB9_8 +; PURECAP-ATOMICS-NEXT: .LBB9_10: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB9_7 +; PURECAP-ATOMICS-NEXT: .LBB9_2: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB9_8 Depth 2 +; PURECAP-ATOMICS-NEXT: beq a3, a2, .LBB9_4 +; PURECAP-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: slt a6, a2, a3 +; PURECAP-ATOMICS-NEXT: j .LBB9_5 +; PURECAP-ATOMICS-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a6, a1, a4 +; PURECAP-ATOMICS-NEXT: .LBB9_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a3 +; PURECAP-ATOMICS-NEXT: mv a7, a4 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB9_1 +; PURECAP-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a2 +; PURECAP-ATOMICS-NEXT: mv a7, a1 +; PURECAP-ATOMICS-NEXT: j .LBB9_1 +; PURECAP-ATOMICS-NEXT: .LBB9_7: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_max: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -96 +; PURECAP-LIBCALLS-NEXT: csc cra, 80(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: cmove cs3, ca0 +; PURECAP-LIBCALLS-NEXT: cld a5, 8(ca0) +; PURECAP-LIBCALLS-NEXT: cld a4, 0(ca0) +; PURECAP-LIBCALLS-NEXT: mv s1, a2 +; PURECAP-LIBCALLS-NEXT: mv s2, a1 +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds cs0, ca0, 16 +; PURECAP-LIBCALLS-NEXT: j .LBB9_2 +; PURECAP-LIBCALLS-NEXT: .LBB9_1: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: csd a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: csd a5, 8(csp) +; PURECAP-LIBCALLS-NEXT: li a4, 5 +; PURECAP-LIBCALLS-NEXT: li a5, 5 +; PURECAP-LIBCALLS-NEXT: cmove ca0, cs3 +; PURECAP-LIBCALLS-NEXT: cmove ca1, cs0 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-LIBCALLS-NEXT: cld a5, 8(csp) +; PURECAP-LIBCALLS-NEXT: cld a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB9_7 +; PURECAP-LIBCALLS-NEXT: .LBB9_2: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-LIBCALLS-NEXT: beq a5, s1, .LBB9_4 +; PURECAP-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: slt a0, s1, a5 +; PURECAP-LIBCALLS-NEXT: j .LBB9_5 +; PURECAP-LIBCALLS-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s2, a4 +; PURECAP-LIBCALLS-NEXT: .LBB9_5: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, a4 +; PURECAP-LIBCALLS-NEXT: mv a3, a5 +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB9_1 +; PURECAP-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, s2 +; PURECAP-LIBCALLS-NEXT: mv a3, s1 +; PURECAP-LIBCALLS-NEXT: j .LBB9_1 +; PURECAP-LIBCALLS-NEXT: .LBB9_7: # %atomicrmw.end +; PURECAP-LIBCALLS-NEXT: mv a0, a4 +; PURECAP-LIBCALLS-NEXT: mv a1, a5 +; PURECAP-LIBCALLS-NEXT: clc cra, 80(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 96 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_max: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: j .LBB9_2 +; HYBRID-ATOMICS-NEXT: .LBB9_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB9_8: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB9_10 +; HYBRID-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_8 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB9_8 +; HYBRID-ATOMICS-NEXT: .LBB9_10: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB9_7 +; HYBRID-ATOMICS-NEXT: .LBB9_2: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB9_8 Depth 2 +; HYBRID-ATOMICS-NEXT: beq a3, a2, .LBB9_4 +; HYBRID-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: slt a6, a2, a3 +; HYBRID-ATOMICS-NEXT: j .LBB9_5 +; HYBRID-ATOMICS-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a6, a1, a4 +; HYBRID-ATOMICS-NEXT: .LBB9_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a3 +; HYBRID-ATOMICS-NEXT: mv a7, a4 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB9_1 +; HYBRID-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a2 +; HYBRID-ATOMICS-NEXT: mv a7, a1 +; HYBRID-ATOMICS-NEXT: j .LBB9_1 +; HYBRID-ATOMICS-NEXT: .LBB9_7: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_max: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -48 +; HYBRID-LIBCALLS-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 32(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s1, 24(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s2, 16(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: ld a5, 8(a0) +; HYBRID-LIBCALLS-NEXT: ld a4, 0(a0) +; HYBRID-LIBCALLS-NEXT: mv s1, a2 +; HYBRID-LIBCALLS-NEXT: mv s2, a1 +; HYBRID-LIBCALLS-NEXT: j .LBB9_2 +; HYBRID-LIBCALLS-NEXT: .LBB9_1: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sd a4, 0(sp) +; HYBRID-LIBCALLS-NEXT: sd a5, 8(sp) +; HYBRID-LIBCALLS-NEXT: mv a1, sp +; HYBRID-LIBCALLS-NEXT: li a4, 5 +; HYBRID-LIBCALLS-NEXT: li a5, 5 +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-LIBCALLS-NEXT: ld a5, 8(sp) +; HYBRID-LIBCALLS-NEXT: ld a4, 0(sp) +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB9_7 +; HYBRID-LIBCALLS-NEXT: .LBB9_2: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-LIBCALLS-NEXT: beq a5, s1, .LBB9_4 +; HYBRID-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: slt a0, s1, a5 +; HYBRID-LIBCALLS-NEXT: j .LBB9_5 +; HYBRID-LIBCALLS-NEXT: .LBB9_4: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s2, a4 +; HYBRID-LIBCALLS-NEXT: .LBB9_5: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, a4 +; HYBRID-LIBCALLS-NEXT: mv a3, a5 +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB9_1 +; HYBRID-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB9_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, s2 +; HYBRID-LIBCALLS-NEXT: mv a3, s1 +; HYBRID-LIBCALLS-NEXT: j .LBB9_1 +; HYBRID-LIBCALLS-NEXT: .LBB9_7: # %atomicrmw.end +; HYBRID-LIBCALLS-NEXT: mv a0, a4 +; HYBRID-LIBCALLS-NEXT: mv a1, a5 +; HYBRID-LIBCALLS-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 32(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s1, 24(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s2, 16(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 48 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_max: ; HYBRID-CAP-PTR: # %bb.0: @@ -650,22 +1427,33 @@ define i128 @atomic_max(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_max ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) -; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 ; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] ; PURECAP-IR: atomicrmw.start: -; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] -; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp sgt i128 [[LOADED]], [[VAL]] -; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i128 [[LOADED]], ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 5, i32 5) -; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 -; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 -; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 -; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = icmp sgt i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i128 [[LOADED]], i128 [[VAL]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP4:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP3]], i64 [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP8:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = trunc i128 [[TMP8]] to i64 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP7]], i64 [[TMP9]]) +; PURECAP-IR-NEXT: [[TMP11:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP6]], ptr addrspace(200) [[TMP10]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 0 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 1 +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = zext i64 [[TMP15]] to i128 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = shl i128 [[TMP17]], 64 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = or i128 [[TMP16]], [[TMP18]] +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP19]], 0 +; PURECAP-IR-NEXT: [[TMP21:%.*]] = insertvalue { i128, i1 } [[TMP20]], i1 [[TMP13]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP21]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP21]], 0 ; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] ; PURECAP-IR: atomicrmw.end: ; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] @@ -697,119 +1485,225 @@ define i128 @atomic_max(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @atomic_min(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_min: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -96 -; PURECAP-NEXT: csc cra, 80(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill -; PURECAP-NEXT: cmove cs3, ca0 -; PURECAP-NEXT: cld a5, 8(ca0) -; PURECAP-NEXT: cld a4, 0(ca0) -; PURECAP-NEXT: mv s1, a2 -; PURECAP-NEXT: mv s2, a1 -; PURECAP-NEXT: cincoffset ca0, csp, 0 -; PURECAP-NEXT: csetbounds cs0, ca0, 16 -; PURECAP-NEXT: j .LBB10_2 -; PURECAP-NEXT: .LBB10_1: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 -; PURECAP-NEXT: csd a4, 0(csp) -; PURECAP-NEXT: csd a5, 8(csp) -; PURECAP-NEXT: li a4, 5 -; PURECAP-NEXT: li a5, 5 -; PURECAP-NEXT: cmove ca0, cs3 -; PURECAP-NEXT: cmove ca1, cs0 -; PURECAP-NEXT: ccall __atomic_compare_exchange_16 -; PURECAP-NEXT: cld a5, 8(csp) -; PURECAP-NEXT: cld a4, 0(csp) -; PURECAP-NEXT: bnez a0, .LBB10_7 -; PURECAP-NEXT: .LBB10_2: # %atomicrmw.start -; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 -; PURECAP-NEXT: beq a5, s1, .LBB10_4 -; PURECAP-NEXT: # %bb.3: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 -; PURECAP-NEXT: slt a0, s1, a5 -; PURECAP-NEXT: j .LBB10_5 -; PURECAP-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 -; PURECAP-NEXT: sltu a0, s2, a4 -; PURECAP-NEXT: .LBB10_5: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 -; PURECAP-NEXT: xori a0, a0, 1 -; PURECAP-NEXT: mv a2, a4 -; PURECAP-NEXT: mv a3, a5 -; PURECAP-NEXT: bnez a0, .LBB10_1 -; PURECAP-NEXT: # %bb.6: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB10_2 Depth=1 -; PURECAP-NEXT: mv a2, s2 -; PURECAP-NEXT: mv a3, s1 -; PURECAP-NEXT: j .LBB10_1 -; PURECAP-NEXT: .LBB10_7: # %atomicrmw.end -; PURECAP-NEXT: mv a0, a4 -; PURECAP-NEXT: mv a1, a5 -; PURECAP-NEXT: clc cra, 80(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 96 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_min: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -48 -; HYBRID-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s0, 32(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s1, 24(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s2, 16(sp) # 8-byte Folded Spill -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: ld a5, 8(a0) -; HYBRID-NEXT: ld a4, 0(a0) -; HYBRID-NEXT: mv s1, a2 -; HYBRID-NEXT: mv s2, a1 -; HYBRID-NEXT: j .LBB10_2 -; HYBRID-NEXT: .LBB10_1: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 -; HYBRID-NEXT: sd a4, 0(sp) -; HYBRID-NEXT: sd a5, 8(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a4, 5 -; HYBRID-NEXT: li a5, 5 -; HYBRID-NEXT: mv a0, s0 -; HYBRID-NEXT: call __atomic_compare_exchange_16@plt -; HYBRID-NEXT: ld a5, 8(sp) -; HYBRID-NEXT: ld a4, 0(sp) -; HYBRID-NEXT: bnez a0, .LBB10_7 -; HYBRID-NEXT: .LBB10_2: # %atomicrmw.start -; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 -; HYBRID-NEXT: beq a5, s1, .LBB10_4 -; HYBRID-NEXT: # %bb.3: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 -; HYBRID-NEXT: slt a0, s1, a5 -; HYBRID-NEXT: j .LBB10_5 -; HYBRID-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 -; HYBRID-NEXT: sltu a0, s2, a4 -; HYBRID-NEXT: .LBB10_5: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 -; HYBRID-NEXT: xori a0, a0, 1 -; HYBRID-NEXT: mv a2, a4 -; HYBRID-NEXT: mv a3, a5 -; HYBRID-NEXT: bnez a0, .LBB10_1 -; HYBRID-NEXT: # %bb.6: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB10_2 Depth=1 -; HYBRID-NEXT: mv a2, s2 -; HYBRID-NEXT: mv a3, s1 -; HYBRID-NEXT: j .LBB10_1 -; HYBRID-NEXT: .LBB10_7: # %atomicrmw.end -; HYBRID-NEXT: mv a0, a4 -; HYBRID-NEXT: mv a1, a5 -; HYBRID-NEXT: ld ra, 40(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s0, 32(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s1, 24(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s2, 16(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 48 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_min: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: j .LBB10_2 +; PURECAP-ATOMICS-NEXT: .LBB10_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB10_8: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB10_10 +; PURECAP-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_8 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB10_8 +; PURECAP-ATOMICS-NEXT: .LBB10_10: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB10_7 +; PURECAP-ATOMICS-NEXT: .LBB10_2: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB10_8 Depth 2 +; PURECAP-ATOMICS-NEXT: beq a3, a2, .LBB10_4 +; PURECAP-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: slt a5, a2, a3 +; PURECAP-ATOMICS-NEXT: j .LBB10_5 +; PURECAP-ATOMICS-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a5, a1, a4 +; PURECAP-ATOMICS-NEXT: .LBB10_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: xori a6, a5, 1 +; PURECAP-ATOMICS-NEXT: mv a5, a3 +; PURECAP-ATOMICS-NEXT: mv a7, a4 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB10_1 +; PURECAP-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a2 +; PURECAP-ATOMICS-NEXT: mv a7, a1 +; PURECAP-ATOMICS-NEXT: j .LBB10_1 +; PURECAP-ATOMICS-NEXT: .LBB10_7: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_min: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -96 +; PURECAP-LIBCALLS-NEXT: csc cra, 80(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: cmove cs3, ca0 +; PURECAP-LIBCALLS-NEXT: cld a5, 8(ca0) +; PURECAP-LIBCALLS-NEXT: cld a4, 0(ca0) +; PURECAP-LIBCALLS-NEXT: mv s1, a2 +; PURECAP-LIBCALLS-NEXT: mv s2, a1 +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds cs0, ca0, 16 +; PURECAP-LIBCALLS-NEXT: j .LBB10_2 +; PURECAP-LIBCALLS-NEXT: .LBB10_1: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: csd a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: csd a5, 8(csp) +; PURECAP-LIBCALLS-NEXT: li a4, 5 +; PURECAP-LIBCALLS-NEXT: li a5, 5 +; PURECAP-LIBCALLS-NEXT: cmove ca0, cs3 +; PURECAP-LIBCALLS-NEXT: cmove ca1, cs0 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-LIBCALLS-NEXT: cld a5, 8(csp) +; PURECAP-LIBCALLS-NEXT: cld a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB10_7 +; PURECAP-LIBCALLS-NEXT: .LBB10_2: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-LIBCALLS-NEXT: beq a5, s1, .LBB10_4 +; PURECAP-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: slt a0, s1, a5 +; PURECAP-LIBCALLS-NEXT: j .LBB10_5 +; PURECAP-LIBCALLS-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s2, a4 +; PURECAP-LIBCALLS-NEXT: .LBB10_5: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: xori a0, a0, 1 +; PURECAP-LIBCALLS-NEXT: mv a2, a4 +; PURECAP-LIBCALLS-NEXT: mv a3, a5 +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB10_1 +; PURECAP-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, s2 +; PURECAP-LIBCALLS-NEXT: mv a3, s1 +; PURECAP-LIBCALLS-NEXT: j .LBB10_1 +; PURECAP-LIBCALLS-NEXT: .LBB10_7: # %atomicrmw.end +; PURECAP-LIBCALLS-NEXT: mv a0, a4 +; PURECAP-LIBCALLS-NEXT: mv a1, a5 +; PURECAP-LIBCALLS-NEXT: clc cra, 80(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 96 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_min: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: j .LBB10_2 +; HYBRID-ATOMICS-NEXT: .LBB10_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB10_8: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB10_10 +; HYBRID-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_8 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB10_8 +; HYBRID-ATOMICS-NEXT: .LBB10_10: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB10_7 +; HYBRID-ATOMICS-NEXT: .LBB10_2: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB10_8 Depth 2 +; HYBRID-ATOMICS-NEXT: beq a3, a2, .LBB10_4 +; HYBRID-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: slt a5, a2, a3 +; HYBRID-ATOMICS-NEXT: j .LBB10_5 +; HYBRID-ATOMICS-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a5, a1, a4 +; HYBRID-ATOMICS-NEXT: .LBB10_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: xori a6, a5, 1 +; HYBRID-ATOMICS-NEXT: mv a5, a3 +; HYBRID-ATOMICS-NEXT: mv a7, a4 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB10_1 +; HYBRID-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a2 +; HYBRID-ATOMICS-NEXT: mv a7, a1 +; HYBRID-ATOMICS-NEXT: j .LBB10_1 +; HYBRID-ATOMICS-NEXT: .LBB10_7: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_min: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -48 +; HYBRID-LIBCALLS-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 32(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s1, 24(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s2, 16(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: ld a5, 8(a0) +; HYBRID-LIBCALLS-NEXT: ld a4, 0(a0) +; HYBRID-LIBCALLS-NEXT: mv s1, a2 +; HYBRID-LIBCALLS-NEXT: mv s2, a1 +; HYBRID-LIBCALLS-NEXT: j .LBB10_2 +; HYBRID-LIBCALLS-NEXT: .LBB10_1: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sd a4, 0(sp) +; HYBRID-LIBCALLS-NEXT: sd a5, 8(sp) +; HYBRID-LIBCALLS-NEXT: mv a1, sp +; HYBRID-LIBCALLS-NEXT: li a4, 5 +; HYBRID-LIBCALLS-NEXT: li a5, 5 +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-LIBCALLS-NEXT: ld a5, 8(sp) +; HYBRID-LIBCALLS-NEXT: ld a4, 0(sp) +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB10_7 +; HYBRID-LIBCALLS-NEXT: .LBB10_2: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-LIBCALLS-NEXT: beq a5, s1, .LBB10_4 +; HYBRID-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: slt a0, s1, a5 +; HYBRID-LIBCALLS-NEXT: j .LBB10_5 +; HYBRID-LIBCALLS-NEXT: .LBB10_4: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s2, a4 +; HYBRID-LIBCALLS-NEXT: .LBB10_5: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: xori a0, a0, 1 +; HYBRID-LIBCALLS-NEXT: mv a2, a4 +; HYBRID-LIBCALLS-NEXT: mv a3, a5 +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB10_1 +; HYBRID-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB10_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, s2 +; HYBRID-LIBCALLS-NEXT: mv a3, s1 +; HYBRID-LIBCALLS-NEXT: j .LBB10_1 +; HYBRID-LIBCALLS-NEXT: .LBB10_7: # %atomicrmw.end +; HYBRID-LIBCALLS-NEXT: mv a0, a4 +; HYBRID-LIBCALLS-NEXT: mv a1, a5 +; HYBRID-LIBCALLS-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 32(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s1, 24(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s2, 16(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 48 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_min: ; HYBRID-CAP-PTR: # %bb.0: @@ -866,22 +1760,33 @@ define i128 @atomic_min(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_min ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) -; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 ; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] ; PURECAP-IR: atomicrmw.start: -; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] -; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp sle i128 [[LOADED]], [[VAL]] -; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i128 [[LOADED]], ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 5, i32 5) -; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 -; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 -; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 -; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = icmp sle i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i128 [[LOADED]], i128 [[VAL]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP4:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP3]], i64 [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP8:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = trunc i128 [[TMP8]] to i64 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP7]], i64 [[TMP9]]) +; PURECAP-IR-NEXT: [[TMP11:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP6]], ptr addrspace(200) [[TMP10]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 0 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 1 +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = zext i64 [[TMP15]] to i128 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = shl i128 [[TMP17]], 64 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = or i128 [[TMP16]], [[TMP18]] +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP19]], 0 +; PURECAP-IR-NEXT: [[TMP21:%.*]] = insertvalue { i128, i1 } [[TMP20]], i1 [[TMP13]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP21]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP21]], 0 ; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] ; PURECAP-IR: atomicrmw.end: ; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] @@ -913,117 +1818,221 @@ define i128 @atomic_min(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @atomic_umax(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_umax: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -96 -; PURECAP-NEXT: csc cra, 80(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill -; PURECAP-NEXT: cmove cs3, ca0 -; PURECAP-NEXT: cld a5, 8(ca0) -; PURECAP-NEXT: cld a4, 0(ca0) -; PURECAP-NEXT: mv s1, a2 -; PURECAP-NEXT: mv s2, a1 -; PURECAP-NEXT: cincoffset ca0, csp, 0 -; PURECAP-NEXT: csetbounds cs0, ca0, 16 -; PURECAP-NEXT: j .LBB11_2 -; PURECAP-NEXT: .LBB11_1: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 -; PURECAP-NEXT: csd a4, 0(csp) -; PURECAP-NEXT: csd a5, 8(csp) -; PURECAP-NEXT: li a4, 5 -; PURECAP-NEXT: li a5, 5 -; PURECAP-NEXT: cmove ca0, cs3 -; PURECAP-NEXT: cmove ca1, cs0 -; PURECAP-NEXT: ccall __atomic_compare_exchange_16 -; PURECAP-NEXT: cld a5, 8(csp) -; PURECAP-NEXT: cld a4, 0(csp) -; PURECAP-NEXT: bnez a0, .LBB11_7 -; PURECAP-NEXT: .LBB11_2: # %atomicrmw.start -; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 -; PURECAP-NEXT: beq a5, s1, .LBB11_4 -; PURECAP-NEXT: # %bb.3: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 -; PURECAP-NEXT: sltu a0, s1, a5 -; PURECAP-NEXT: j .LBB11_5 -; PURECAP-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 -; PURECAP-NEXT: sltu a0, s2, a4 -; PURECAP-NEXT: .LBB11_5: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 -; PURECAP-NEXT: mv a2, a4 -; PURECAP-NEXT: mv a3, a5 -; PURECAP-NEXT: bnez a0, .LBB11_1 -; PURECAP-NEXT: # %bb.6: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB11_2 Depth=1 -; PURECAP-NEXT: mv a2, s2 -; PURECAP-NEXT: mv a3, s1 -; PURECAP-NEXT: j .LBB11_1 -; PURECAP-NEXT: .LBB11_7: # %atomicrmw.end -; PURECAP-NEXT: mv a0, a4 -; PURECAP-NEXT: mv a1, a5 -; PURECAP-NEXT: clc cra, 80(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 96 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_umax: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -48 -; HYBRID-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s0, 32(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s1, 24(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s2, 16(sp) # 8-byte Folded Spill -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: ld a5, 8(a0) -; HYBRID-NEXT: ld a4, 0(a0) -; HYBRID-NEXT: mv s1, a2 -; HYBRID-NEXT: mv s2, a1 -; HYBRID-NEXT: j .LBB11_2 -; HYBRID-NEXT: .LBB11_1: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 -; HYBRID-NEXT: sd a4, 0(sp) -; HYBRID-NEXT: sd a5, 8(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a4, 5 -; HYBRID-NEXT: li a5, 5 -; HYBRID-NEXT: mv a0, s0 -; HYBRID-NEXT: call __atomic_compare_exchange_16@plt -; HYBRID-NEXT: ld a5, 8(sp) -; HYBRID-NEXT: ld a4, 0(sp) -; HYBRID-NEXT: bnez a0, .LBB11_7 -; HYBRID-NEXT: .LBB11_2: # %atomicrmw.start -; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 -; HYBRID-NEXT: beq a5, s1, .LBB11_4 -; HYBRID-NEXT: # %bb.3: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 -; HYBRID-NEXT: sltu a0, s1, a5 -; HYBRID-NEXT: j .LBB11_5 -; HYBRID-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 -; HYBRID-NEXT: sltu a0, s2, a4 -; HYBRID-NEXT: .LBB11_5: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 -; HYBRID-NEXT: mv a2, a4 -; HYBRID-NEXT: mv a3, a5 -; HYBRID-NEXT: bnez a0, .LBB11_1 -; HYBRID-NEXT: # %bb.6: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB11_2 Depth=1 -; HYBRID-NEXT: mv a2, s2 -; HYBRID-NEXT: mv a3, s1 -; HYBRID-NEXT: j .LBB11_1 -; HYBRID-NEXT: .LBB11_7: # %atomicrmw.end -; HYBRID-NEXT: mv a0, a4 -; HYBRID-NEXT: mv a1, a5 -; HYBRID-NEXT: ld ra, 40(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s0, 32(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s1, 24(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s2, 16(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 48 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_umax: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: j .LBB11_2 +; PURECAP-ATOMICS-NEXT: .LBB11_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB11_8: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB11_10 +; PURECAP-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_8 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB11_8 +; PURECAP-ATOMICS-NEXT: .LBB11_10: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB11_7 +; PURECAP-ATOMICS-NEXT: .LBB11_2: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB11_8 Depth 2 +; PURECAP-ATOMICS-NEXT: beq a3, a2, .LBB11_4 +; PURECAP-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a6, a2, a3 +; PURECAP-ATOMICS-NEXT: j .LBB11_5 +; PURECAP-ATOMICS-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a6, a1, a4 +; PURECAP-ATOMICS-NEXT: .LBB11_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a3 +; PURECAP-ATOMICS-NEXT: mv a7, a4 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB11_1 +; PURECAP-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a2 +; PURECAP-ATOMICS-NEXT: mv a7, a1 +; PURECAP-ATOMICS-NEXT: j .LBB11_1 +; PURECAP-ATOMICS-NEXT: .LBB11_7: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_umax: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -96 +; PURECAP-LIBCALLS-NEXT: csc cra, 80(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: cmove cs3, ca0 +; PURECAP-LIBCALLS-NEXT: cld a5, 8(ca0) +; PURECAP-LIBCALLS-NEXT: cld a4, 0(ca0) +; PURECAP-LIBCALLS-NEXT: mv s1, a2 +; PURECAP-LIBCALLS-NEXT: mv s2, a1 +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds cs0, ca0, 16 +; PURECAP-LIBCALLS-NEXT: j .LBB11_2 +; PURECAP-LIBCALLS-NEXT: .LBB11_1: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: csd a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: csd a5, 8(csp) +; PURECAP-LIBCALLS-NEXT: li a4, 5 +; PURECAP-LIBCALLS-NEXT: li a5, 5 +; PURECAP-LIBCALLS-NEXT: cmove ca0, cs3 +; PURECAP-LIBCALLS-NEXT: cmove ca1, cs0 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-LIBCALLS-NEXT: cld a5, 8(csp) +; PURECAP-LIBCALLS-NEXT: cld a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB11_7 +; PURECAP-LIBCALLS-NEXT: .LBB11_2: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-LIBCALLS-NEXT: beq a5, s1, .LBB11_4 +; PURECAP-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s1, a5 +; PURECAP-LIBCALLS-NEXT: j .LBB11_5 +; PURECAP-LIBCALLS-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s2, a4 +; PURECAP-LIBCALLS-NEXT: .LBB11_5: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, a4 +; PURECAP-LIBCALLS-NEXT: mv a3, a5 +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB11_1 +; PURECAP-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, s2 +; PURECAP-LIBCALLS-NEXT: mv a3, s1 +; PURECAP-LIBCALLS-NEXT: j .LBB11_1 +; PURECAP-LIBCALLS-NEXT: .LBB11_7: # %atomicrmw.end +; PURECAP-LIBCALLS-NEXT: mv a0, a4 +; PURECAP-LIBCALLS-NEXT: mv a1, a5 +; PURECAP-LIBCALLS-NEXT: clc cra, 80(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 96 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_umax: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: j .LBB11_2 +; HYBRID-ATOMICS-NEXT: .LBB11_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB11_8: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB11_10 +; HYBRID-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_8 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB11_8 +; HYBRID-ATOMICS-NEXT: .LBB11_10: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB11_7 +; HYBRID-ATOMICS-NEXT: .LBB11_2: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB11_8 Depth 2 +; HYBRID-ATOMICS-NEXT: beq a3, a2, .LBB11_4 +; HYBRID-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a6, a2, a3 +; HYBRID-ATOMICS-NEXT: j .LBB11_5 +; HYBRID-ATOMICS-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a6, a1, a4 +; HYBRID-ATOMICS-NEXT: .LBB11_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a3 +; HYBRID-ATOMICS-NEXT: mv a7, a4 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB11_1 +; HYBRID-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a2 +; HYBRID-ATOMICS-NEXT: mv a7, a1 +; HYBRID-ATOMICS-NEXT: j .LBB11_1 +; HYBRID-ATOMICS-NEXT: .LBB11_7: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_umax: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -48 +; HYBRID-LIBCALLS-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 32(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s1, 24(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s2, 16(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: ld a5, 8(a0) +; HYBRID-LIBCALLS-NEXT: ld a4, 0(a0) +; HYBRID-LIBCALLS-NEXT: mv s1, a2 +; HYBRID-LIBCALLS-NEXT: mv s2, a1 +; HYBRID-LIBCALLS-NEXT: j .LBB11_2 +; HYBRID-LIBCALLS-NEXT: .LBB11_1: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sd a4, 0(sp) +; HYBRID-LIBCALLS-NEXT: sd a5, 8(sp) +; HYBRID-LIBCALLS-NEXT: mv a1, sp +; HYBRID-LIBCALLS-NEXT: li a4, 5 +; HYBRID-LIBCALLS-NEXT: li a5, 5 +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-LIBCALLS-NEXT: ld a5, 8(sp) +; HYBRID-LIBCALLS-NEXT: ld a4, 0(sp) +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB11_7 +; HYBRID-LIBCALLS-NEXT: .LBB11_2: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-LIBCALLS-NEXT: beq a5, s1, .LBB11_4 +; HYBRID-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s1, a5 +; HYBRID-LIBCALLS-NEXT: j .LBB11_5 +; HYBRID-LIBCALLS-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s2, a4 +; HYBRID-LIBCALLS-NEXT: .LBB11_5: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, a4 +; HYBRID-LIBCALLS-NEXT: mv a3, a5 +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB11_1 +; HYBRID-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB11_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, s2 +; HYBRID-LIBCALLS-NEXT: mv a3, s1 +; HYBRID-LIBCALLS-NEXT: j .LBB11_1 +; HYBRID-LIBCALLS-NEXT: .LBB11_7: # %atomicrmw.end +; HYBRID-LIBCALLS-NEXT: mv a0, a4 +; HYBRID-LIBCALLS-NEXT: mv a1, a5 +; HYBRID-LIBCALLS-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 32(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s1, 24(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s2, 16(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 48 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_umax: ; HYBRID-CAP-PTR: # %bb.0: @@ -1079,22 +2088,33 @@ define i128 @atomic_umax(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_umax ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) -; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 ; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] ; PURECAP-IR: atomicrmw.start: -; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] -; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp ugt i128 [[LOADED]], [[VAL]] -; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i128 [[LOADED]], ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 5, i32 5) -; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 -; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 -; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 -; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = icmp ugt i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i128 [[LOADED]], i128 [[VAL]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP4:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP3]], i64 [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP8:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = trunc i128 [[TMP8]] to i64 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP7]], i64 [[TMP9]]) +; PURECAP-IR-NEXT: [[TMP11:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP6]], ptr addrspace(200) [[TMP10]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 0 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 1 +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = zext i64 [[TMP15]] to i128 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = shl i128 [[TMP17]], 64 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = or i128 [[TMP16]], [[TMP18]] +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP19]], 0 +; PURECAP-IR-NEXT: [[TMP21:%.*]] = insertvalue { i128, i1 } [[TMP20]], i1 [[TMP13]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP21]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP21]], 0 ; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] ; PURECAP-IR: atomicrmw.end: ; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] @@ -1126,119 +2146,225 @@ define i128 @atomic_umax(ptr addrspace(200) %ptr, i128 %val) nounwind { } define i128 @atomic_umin(ptr addrspace(200) %ptr, i128 %val) nounwind { -; PURECAP-LABEL: atomic_umin: -; PURECAP: # %bb.0: -; PURECAP-NEXT: cincoffset csp, csp, -96 -; PURECAP-NEXT: csc cra, 80(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill -; PURECAP-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill -; PURECAP-NEXT: cmove cs3, ca0 -; PURECAP-NEXT: cld a5, 8(ca0) -; PURECAP-NEXT: cld a4, 0(ca0) -; PURECAP-NEXT: mv s1, a2 -; PURECAP-NEXT: mv s2, a1 -; PURECAP-NEXT: cincoffset ca0, csp, 0 -; PURECAP-NEXT: csetbounds cs0, ca0, 16 -; PURECAP-NEXT: j .LBB12_2 -; PURECAP-NEXT: .LBB12_1: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 -; PURECAP-NEXT: csd a4, 0(csp) -; PURECAP-NEXT: csd a5, 8(csp) -; PURECAP-NEXT: li a4, 5 -; PURECAP-NEXT: li a5, 5 -; PURECAP-NEXT: cmove ca0, cs3 -; PURECAP-NEXT: cmove ca1, cs0 -; PURECAP-NEXT: ccall __atomic_compare_exchange_16 -; PURECAP-NEXT: cld a5, 8(csp) -; PURECAP-NEXT: cld a4, 0(csp) -; PURECAP-NEXT: bnez a0, .LBB12_7 -; PURECAP-NEXT: .LBB12_2: # %atomicrmw.start -; PURECAP-NEXT: # =>This Inner Loop Header: Depth=1 -; PURECAP-NEXT: beq a5, s1, .LBB12_4 -; PURECAP-NEXT: # %bb.3: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 -; PURECAP-NEXT: sltu a0, s1, a5 -; PURECAP-NEXT: j .LBB12_5 -; PURECAP-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 -; PURECAP-NEXT: sltu a0, s2, a4 -; PURECAP-NEXT: .LBB12_5: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 -; PURECAP-NEXT: xori a0, a0, 1 -; PURECAP-NEXT: mv a2, a4 -; PURECAP-NEXT: mv a3, a5 -; PURECAP-NEXT: bnez a0, .LBB12_1 -; PURECAP-NEXT: # %bb.6: # %atomicrmw.start -; PURECAP-NEXT: # in Loop: Header=BB12_2 Depth=1 -; PURECAP-NEXT: mv a2, s2 -; PURECAP-NEXT: mv a3, s1 -; PURECAP-NEXT: j .LBB12_1 -; PURECAP-NEXT: .LBB12_7: # %atomicrmw.end -; PURECAP-NEXT: mv a0, a4 -; PURECAP-NEXT: mv a1, a5 -; PURECAP-NEXT: clc cra, 80(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload -; PURECAP-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload -; PURECAP-NEXT: cincoffset csp, csp, 96 -; PURECAP-NEXT: cret -; -; HYBRID-LABEL: atomic_umin: -; HYBRID: # %bb.0: -; HYBRID-NEXT: addi sp, sp, -48 -; HYBRID-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s0, 32(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s1, 24(sp) # 8-byte Folded Spill -; HYBRID-NEXT: sd s2, 16(sp) # 8-byte Folded Spill -; HYBRID-NEXT: mv s0, a0 -; HYBRID-NEXT: ld a5, 8(a0) -; HYBRID-NEXT: ld a4, 0(a0) -; HYBRID-NEXT: mv s1, a2 -; HYBRID-NEXT: mv s2, a1 -; HYBRID-NEXT: j .LBB12_2 -; HYBRID-NEXT: .LBB12_1: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 -; HYBRID-NEXT: sd a4, 0(sp) -; HYBRID-NEXT: sd a5, 8(sp) -; HYBRID-NEXT: mv a1, sp -; HYBRID-NEXT: li a4, 5 -; HYBRID-NEXT: li a5, 5 -; HYBRID-NEXT: mv a0, s0 -; HYBRID-NEXT: call __atomic_compare_exchange_16@plt -; HYBRID-NEXT: ld a5, 8(sp) -; HYBRID-NEXT: ld a4, 0(sp) -; HYBRID-NEXT: bnez a0, .LBB12_7 -; HYBRID-NEXT: .LBB12_2: # %atomicrmw.start -; HYBRID-NEXT: # =>This Inner Loop Header: Depth=1 -; HYBRID-NEXT: beq a5, s1, .LBB12_4 -; HYBRID-NEXT: # %bb.3: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 -; HYBRID-NEXT: sltu a0, s1, a5 -; HYBRID-NEXT: j .LBB12_5 -; HYBRID-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 -; HYBRID-NEXT: sltu a0, s2, a4 -; HYBRID-NEXT: .LBB12_5: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 -; HYBRID-NEXT: xori a0, a0, 1 -; HYBRID-NEXT: mv a2, a4 -; HYBRID-NEXT: mv a3, a5 -; HYBRID-NEXT: bnez a0, .LBB12_1 -; HYBRID-NEXT: # %bb.6: # %atomicrmw.start -; HYBRID-NEXT: # in Loop: Header=BB12_2 Depth=1 -; HYBRID-NEXT: mv a2, s2 -; HYBRID-NEXT: mv a3, s1 -; HYBRID-NEXT: j .LBB12_1 -; HYBRID-NEXT: .LBB12_7: # %atomicrmw.end -; HYBRID-NEXT: mv a0, a4 -; HYBRID-NEXT: mv a1, a5 -; HYBRID-NEXT: ld ra, 40(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s0, 32(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s1, 24(sp) # 8-byte Folded Reload -; HYBRID-NEXT: ld s2, 16(sp) # 8-byte Folded Reload -; HYBRID-NEXT: addi sp, sp, 48 -; HYBRID-NEXT: ret +; PURECAP-ATOMICS-LABEL: atomic_umin: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) +; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) +; PURECAP-ATOMICS-NEXT: j .LBB12_2 +; PURECAP-ATOMICS-NEXT: .LBB12_1: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; PURECAP-ATOMICS-NEXT: .LBB12_8: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # Parent Loop BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: beqz a6, .LBB12_10 +; PURECAP-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_8 Depth=2 +; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB12_8 +; PURECAP-ATOMICS-NEXT: .LBB12_10: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; PURECAP-ATOMICS-NEXT: mv a4, a5 +; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB12_7 +; PURECAP-ATOMICS-NEXT: .LBB12_2: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; PURECAP-ATOMICS-NEXT: # Child Loop BB12_8 Depth 2 +; PURECAP-ATOMICS-NEXT: beq a3, a2, .LBB12_4 +; PURECAP-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a5, a2, a3 +; PURECAP-ATOMICS-NEXT: j .LBB12_5 +; PURECAP-ATOMICS-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: sltu a5, a1, a4 +; PURECAP-ATOMICS-NEXT: .LBB12_5: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: xori a6, a5, 1 +; PURECAP-ATOMICS-NEXT: mv a5, a3 +; PURECAP-ATOMICS-NEXT: mv a7, a4 +; PURECAP-ATOMICS-NEXT: bnez a6, .LBB12_1 +; PURECAP-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-ATOMICS-NEXT: mv a5, a2 +; PURECAP-ATOMICS-NEXT: mv a7, a1 +; PURECAP-ATOMICS-NEXT: j .LBB12_1 +; PURECAP-ATOMICS-NEXT: .LBB12_7: # %atomicrmw.end +; PURECAP-ATOMICS-NEXT: mv a0, a4 +; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: atomic_umin: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -96 +; PURECAP-LIBCALLS-NEXT: csc cra, 80(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 64(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs1, 48(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs2, 32(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs3, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: cmove cs3, ca0 +; PURECAP-LIBCALLS-NEXT: cld a5, 8(ca0) +; PURECAP-LIBCALLS-NEXT: cld a4, 0(ca0) +; PURECAP-LIBCALLS-NEXT: mv s1, a2 +; PURECAP-LIBCALLS-NEXT: mv s2, a1 +; PURECAP-LIBCALLS-NEXT: cincoffset ca0, csp, 0 +; PURECAP-LIBCALLS-NEXT: csetbounds cs0, ca0, 16 +; PURECAP-LIBCALLS-NEXT: j .LBB12_2 +; PURECAP-LIBCALLS-NEXT: .LBB12_1: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: csd a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: csd a5, 8(csp) +; PURECAP-LIBCALLS-NEXT: li a4, 5 +; PURECAP-LIBCALLS-NEXT: li a5, 5 +; PURECAP-LIBCALLS-NEXT: cmove ca0, cs3 +; PURECAP-LIBCALLS-NEXT: cmove ca1, cs0 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_compare_exchange_16 +; PURECAP-LIBCALLS-NEXT: cld a5, 8(csp) +; PURECAP-LIBCALLS-NEXT: cld a4, 0(csp) +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB12_7 +; PURECAP-LIBCALLS-NEXT: .LBB12_2: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; PURECAP-LIBCALLS-NEXT: beq a5, s1, .LBB12_4 +; PURECAP-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s1, a5 +; PURECAP-LIBCALLS-NEXT: j .LBB12_5 +; PURECAP-LIBCALLS-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: sltu a0, s2, a4 +; PURECAP-LIBCALLS-NEXT: .LBB12_5: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: xori a0, a0, 1 +; PURECAP-LIBCALLS-NEXT: mv a2, a4 +; PURECAP-LIBCALLS-NEXT: mv a3, a5 +; PURECAP-LIBCALLS-NEXT: bnez a0, .LBB12_1 +; PURECAP-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; PURECAP-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; PURECAP-LIBCALLS-NEXT: mv a2, s2 +; PURECAP-LIBCALLS-NEXT: mv a3, s1 +; PURECAP-LIBCALLS-NEXT: j .LBB12_1 +; PURECAP-LIBCALLS-NEXT: .LBB12_7: # %atomicrmw.end +; PURECAP-LIBCALLS-NEXT: mv a0, a4 +; PURECAP-LIBCALLS-NEXT: mv a1, a5 +; PURECAP-LIBCALLS-NEXT: clc cra, 80(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 64(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs1, 48(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs2, 32(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs3, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 96 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: atomic_umin: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) +; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) +; HYBRID-ATOMICS-NEXT: j .LBB12_2 +; HYBRID-ATOMICS-NEXT: .LBB12_1: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 +; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a7 +; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a5 +; HYBRID-ATOMICS-NEXT: .LBB12_8: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # Parent Loop BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 +; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: beqz a6, .LBB12_10 +; HYBRID-ATOMICS-NEXT: # %bb.9: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_8 Depth=2 +; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB12_8 +; HYBRID-ATOMICS-NEXT: .LBB12_10: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 +; HYBRID-ATOMICS-NEXT: mv a4, a5 +; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB12_7 +; HYBRID-ATOMICS-NEXT: .LBB12_2: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 +; HYBRID-ATOMICS-NEXT: # Child Loop BB12_8 Depth 2 +; HYBRID-ATOMICS-NEXT: beq a3, a2, .LBB12_4 +; HYBRID-ATOMICS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a5, a2, a3 +; HYBRID-ATOMICS-NEXT: j .LBB12_5 +; HYBRID-ATOMICS-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: sltu a5, a1, a4 +; HYBRID-ATOMICS-NEXT: .LBB12_5: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: xori a6, a5, 1 +; HYBRID-ATOMICS-NEXT: mv a5, a3 +; HYBRID-ATOMICS-NEXT: mv a7, a4 +; HYBRID-ATOMICS-NEXT: bnez a6, .LBB12_1 +; HYBRID-ATOMICS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-ATOMICS-NEXT: mv a5, a2 +; HYBRID-ATOMICS-NEXT: mv a7, a1 +; HYBRID-ATOMICS-NEXT: j .LBB12_1 +; HYBRID-ATOMICS-NEXT: .LBB12_7: # %atomicrmw.end +; HYBRID-ATOMICS-NEXT: mv a0, a4 +; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: atomic_umin: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -48 +; HYBRID-LIBCALLS-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 32(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s1, 24(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s2, 16(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a0 +; HYBRID-LIBCALLS-NEXT: ld a5, 8(a0) +; HYBRID-LIBCALLS-NEXT: ld a4, 0(a0) +; HYBRID-LIBCALLS-NEXT: mv s1, a2 +; HYBRID-LIBCALLS-NEXT: mv s2, a1 +; HYBRID-LIBCALLS-NEXT: j .LBB12_2 +; HYBRID-LIBCALLS-NEXT: .LBB12_1: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sd a4, 0(sp) +; HYBRID-LIBCALLS-NEXT: sd a5, 8(sp) +; HYBRID-LIBCALLS-NEXT: mv a1, sp +; HYBRID-LIBCALLS-NEXT: li a4, 5 +; HYBRID-LIBCALLS-NEXT: li a5, 5 +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: call __atomic_compare_exchange_16@plt +; HYBRID-LIBCALLS-NEXT: ld a5, 8(sp) +; HYBRID-LIBCALLS-NEXT: ld a4, 0(sp) +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB12_7 +; HYBRID-LIBCALLS-NEXT: .LBB12_2: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # =>This Inner Loop Header: Depth=1 +; HYBRID-LIBCALLS-NEXT: beq a5, s1, .LBB12_4 +; HYBRID-LIBCALLS-NEXT: # %bb.3: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s1, a5 +; HYBRID-LIBCALLS-NEXT: j .LBB12_5 +; HYBRID-LIBCALLS-NEXT: .LBB12_4: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: sltu a0, s2, a4 +; HYBRID-LIBCALLS-NEXT: .LBB12_5: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: xori a0, a0, 1 +; HYBRID-LIBCALLS-NEXT: mv a2, a4 +; HYBRID-LIBCALLS-NEXT: mv a3, a5 +; HYBRID-LIBCALLS-NEXT: bnez a0, .LBB12_1 +; HYBRID-LIBCALLS-NEXT: # %bb.6: # %atomicrmw.start +; HYBRID-LIBCALLS-NEXT: # in Loop: Header=BB12_2 Depth=1 +; HYBRID-LIBCALLS-NEXT: mv a2, s2 +; HYBRID-LIBCALLS-NEXT: mv a3, s1 +; HYBRID-LIBCALLS-NEXT: j .LBB12_1 +; HYBRID-LIBCALLS-NEXT: .LBB12_7: # %atomicrmw.end +; HYBRID-LIBCALLS-NEXT: mv a0, a4 +; HYBRID-LIBCALLS-NEXT: mv a1, a5 +; HYBRID-LIBCALLS-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 32(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s1, 24(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s2, 16(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 48 +; HYBRID-LIBCALLS-NEXT: ret ; ; HYBRID-CAP-PTR-LABEL: atomic_umin: ; HYBRID-CAP-PTR: # %bb.0: @@ -1295,22 +2421,33 @@ define i128 @atomic_umin(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_umin ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = alloca i128, align 16, addrspace(200) -; PURECAP-IR-NEXT: [[TMP2:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 +; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 ; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] ; PURECAP-IR: atomicrmw.start: -; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] -; PURECAP-IR-NEXT: [[TMP3:%.*]] = icmp ule i128 [[LOADED]], [[VAL]] -; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i128 [[LOADED]], i128 [[VAL]] -; PURECAP-IR-NEXT: call void @llvm.lifetime.start.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: store i128 [[LOADED]], ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP1]], i128 [[NEW]], i32 5, i32 5) -; PURECAP-IR-NEXT: [[TMP5:%.*]] = load i128, ptr addrspace(200) [[TMP1]], align 16 -; PURECAP-IR-NEXT: call void @llvm.lifetime.end.p200(i64 16, ptr addrspace(200) [[TMP1]]) -; PURECAP-IR-NEXT: [[TMP6:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP5]], 0 -; PURECAP-IR-NEXT: [[TMP7:%.*]] = insertvalue { i128, i1 } [[TMP6]], i1 [[TMP4]], 1 -; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP7]], 1 -; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP7]], 0 +; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = icmp ule i128 [[LOADED]], [[VAL]] +; PURECAP-IR-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i128 [[LOADED]], i128 [[VAL]] +; PURECAP-IR-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] +; PURECAP-IR-NEXT: [[TMP4:%.*]] = lshr i128 [[LOADED]], 64 +; PURECAP-IR-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP3]], i64 [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[NEW]] +; PURECAP-IR-NEXT: [[TMP8:%.*]] = lshr i128 [[NEW]], 64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = trunc i128 [[TMP8]] to i64 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP7]], i64 [[TMP9]]) +; PURECAP-IR-NEXT: [[TMP11:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP6]], ptr addrspace(200) [[TMP10]] seq_cst seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 0 +; PURECAP-IR-NEXT: [[TMP13:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP11]], 1 +; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP15:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP12]]) +; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 +; PURECAP-IR-NEXT: [[TMP17:%.*]] = zext i64 [[TMP15]] to i128 +; PURECAP-IR-NEXT: [[TMP18:%.*]] = shl i128 [[TMP17]], 64 +; PURECAP-IR-NEXT: [[TMP19:%.*]] = or i128 [[TMP16]], [[TMP18]] +; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP19]], 0 +; PURECAP-IR-NEXT: [[TMP21:%.*]] = insertvalue { i128, i1 } [[TMP20]], i1 [[TMP13]], 1 +; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP21]], 1 +; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP21]], 0 ; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] ; PURECAP-IR: atomicrmw.end: ; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] diff --git a/llvm/test/CodeGen/RISCV/cheri/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/cheri/atomic-rmw.ll index f50774cb8c16..1e04f6f3b825 100644 --- a/llvm/test/CodeGen/RISCV/cheri/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/cheri/atomic-rmw.ll @@ -11531,12 +11531,34 @@ define i64 @atomicrmw_xchg_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwin ; ; RV32IAXCHERI-LABEL: atomicrmw_xchg_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB165_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB165_3 Depth 2 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a2 +; RV32IAXCHERI-NEXT: .LBB165_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB165_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB165_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB165_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB165_3 +; RV32IAXCHERI-NEXT: .LBB165_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB165_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB165_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xchg_i64_monotonic: @@ -11570,12 +11592,34 @@ define i64 @atomicrmw_xchg_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_xchg_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 2 -; RV32IAXCHERI-NEXT: ccall __atomic_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB166_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB166_3 Depth 2 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a2 +; RV32IAXCHERI-NEXT: .LBB166_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB166_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB166_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB166_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB166_3 +; RV32IAXCHERI-NEXT: .LBB166_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB166_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB166_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xchg_i64_acquire: @@ -11609,12 +11653,34 @@ define i64 @atomicrmw_xchg_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_xchg_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 3 -; RV32IAXCHERI-NEXT: ccall __atomic_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB167_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB167_3 Depth 2 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a2 +; RV32IAXCHERI-NEXT: .LBB167_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB167_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB167_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB167_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB167_3 +; RV32IAXCHERI-NEXT: .LBB167_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB167_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB167_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xchg_i64_release: @@ -11648,12 +11714,34 @@ define i64 @atomicrmw_xchg_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_xchg_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 4 -; RV32IAXCHERI-NEXT: ccall __atomic_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB168_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB168_3 Depth 2 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a2 +; RV32IAXCHERI-NEXT: .LBB168_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB168_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB168_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB168_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB168_3 +; RV32IAXCHERI-NEXT: .LBB168_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB168_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB168_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xchg_i64_acq_rel: @@ -11687,12 +11775,34 @@ define i64 @atomicrmw_xchg_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_xchg_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_exchange_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB169_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB169_3 Depth 2 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a2 +; RV32IAXCHERI-NEXT: .LBB169_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB169_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB169_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB169_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB169_3 +; RV32IAXCHERI-NEXT: .LBB169_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB169_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB169_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xchg_i64_seq_cst: @@ -11726,12 +11836,38 @@ define i64 @atomicrmw_add_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_add_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_add_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB170_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB170_3 Depth 2 +; RV32IAXCHERI-NEXT: add a5, a4, a1 +; RV32IAXCHERI-NEXT: sltu a6, a5, a4 +; RV32IAXCHERI-NEXT: add a7, a3, a2 +; RV32IAXCHERI-NEXT: add a6, a7, a6 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a5 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a6 +; RV32IAXCHERI-NEXT: .LBB170_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB170_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB170_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB170_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB170_3 +; RV32IAXCHERI-NEXT: .LBB170_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB170_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB170_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_add_i64_monotonic: @@ -11765,12 +11901,38 @@ define i64 @atomicrmw_add_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_add_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 2 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_add_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB171_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB171_3 Depth 2 +; RV32IAXCHERI-NEXT: add a5, a4, a1 +; RV32IAXCHERI-NEXT: sltu a6, a5, a4 +; RV32IAXCHERI-NEXT: add a7, a3, a2 +; RV32IAXCHERI-NEXT: add a6, a7, a6 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a5 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a6 +; RV32IAXCHERI-NEXT: .LBB171_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB171_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB171_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB171_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB171_3 +; RV32IAXCHERI-NEXT: .LBB171_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB171_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB171_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_add_i64_acquire: @@ -11804,12 +11966,38 @@ define i64 @atomicrmw_add_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_add_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 3 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_add_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB172_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB172_3 Depth 2 +; RV32IAXCHERI-NEXT: add a5, a4, a1 +; RV32IAXCHERI-NEXT: sltu a6, a5, a4 +; RV32IAXCHERI-NEXT: add a7, a3, a2 +; RV32IAXCHERI-NEXT: add a6, a7, a6 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a5 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a6 +; RV32IAXCHERI-NEXT: .LBB172_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB172_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB172_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB172_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB172_3 +; RV32IAXCHERI-NEXT: .LBB172_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB172_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB172_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_add_i64_release: @@ -11843,12 +12031,38 @@ define i64 @atomicrmw_add_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_add_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 4 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_add_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB173_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB173_3 Depth 2 +; RV32IAXCHERI-NEXT: add a5, a4, a1 +; RV32IAXCHERI-NEXT: sltu a6, a5, a4 +; RV32IAXCHERI-NEXT: add a7, a3, a2 +; RV32IAXCHERI-NEXT: add a6, a7, a6 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a5 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a6 +; RV32IAXCHERI-NEXT: .LBB173_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB173_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB173_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB173_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB173_3 +; RV32IAXCHERI-NEXT: .LBB173_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB173_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB173_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_add_i64_acq_rel: @@ -11882,12 +12096,38 @@ define i64 @atomicrmw_add_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_add_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_add_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB174_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB174_3 Depth 2 +; RV32IAXCHERI-NEXT: add a5, a4, a1 +; RV32IAXCHERI-NEXT: sltu a6, a5, a4 +; RV32IAXCHERI-NEXT: add a7, a3, a2 +; RV32IAXCHERI-NEXT: add a6, a7, a6 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a5 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a6 +; RV32IAXCHERI-NEXT: .LBB174_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB174_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB174_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB174_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB174_3 +; RV32IAXCHERI-NEXT: .LBB174_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB174_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB174_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_add_i64_seq_cst: @@ -11921,12 +12161,38 @@ define i64 @atomicrmw_sub_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_sub_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_sub_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB175_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB175_3 Depth 2 +; RV32IAXCHERI-NEXT: sltu a5, a4, a1 +; RV32IAXCHERI-NEXT: sub a6, a3, a2 +; RV32IAXCHERI-NEXT: sub a5, a6, a5 +; RV32IAXCHERI-NEXT: sub a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB175_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB175_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB175_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB175_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB175_3 +; RV32IAXCHERI-NEXT: .LBB175_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB175_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB175_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_sub_i64_monotonic: @@ -11961,12 +12227,38 @@ define i64 @atomicrmw_sub_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_sub_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 2 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_sub_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB176_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB176_3 Depth 2 +; RV32IAXCHERI-NEXT: sltu a5, a4, a1 +; RV32IAXCHERI-NEXT: sub a6, a3, a2 +; RV32IAXCHERI-NEXT: sub a5, a6, a5 +; RV32IAXCHERI-NEXT: sub a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB176_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB176_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB176_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB176_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB176_3 +; RV32IAXCHERI-NEXT: .LBB176_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB176_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB176_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_sub_i64_acquire: @@ -12001,12 +12293,38 @@ define i64 @atomicrmw_sub_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_sub_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 3 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_sub_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB177_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB177_3 Depth 2 +; RV32IAXCHERI-NEXT: sltu a5, a4, a1 +; RV32IAXCHERI-NEXT: sub a6, a3, a2 +; RV32IAXCHERI-NEXT: sub a5, a6, a5 +; RV32IAXCHERI-NEXT: sub a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB177_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB177_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB177_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB177_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB177_3 +; RV32IAXCHERI-NEXT: .LBB177_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB177_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB177_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_sub_i64_release: @@ -12041,12 +12359,38 @@ define i64 @atomicrmw_sub_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_sub_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 4 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_sub_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB178_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB178_3 Depth 2 +; RV32IAXCHERI-NEXT: sltu a5, a4, a1 +; RV32IAXCHERI-NEXT: sub a6, a3, a2 +; RV32IAXCHERI-NEXT: sub a5, a6, a5 +; RV32IAXCHERI-NEXT: sub a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB178_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB178_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB178_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB178_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB178_3 +; RV32IAXCHERI-NEXT: .LBB178_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB178_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB178_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_sub_i64_acq_rel: @@ -12081,12 +12425,38 @@ define i64 @atomicrmw_sub_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_sub_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_sub_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB179_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB179_3 Depth 2 +; RV32IAXCHERI-NEXT: sltu a5, a4, a1 +; RV32IAXCHERI-NEXT: sub a6, a3, a2 +; RV32IAXCHERI-NEXT: sub a5, a6, a5 +; RV32IAXCHERI-NEXT: sub a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB179_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB179_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB179_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB179_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB179_3 +; RV32IAXCHERI-NEXT: .LBB179_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB179_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB179_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_sub_i64_seq_cst: @@ -12121,12 +12491,36 @@ define i64 @atomicrmw_and_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_and_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_and_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB180_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB180_3 Depth 2 +; RV32IAXCHERI-NEXT: and a5, a3, a2 +; RV32IAXCHERI-NEXT: and a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB180_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB180_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB180_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB180_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB180_3 +; RV32IAXCHERI-NEXT: .LBB180_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB180_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB180_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_and_i64_monotonic: @@ -12160,12 +12554,36 @@ define i64 @atomicrmw_and_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_and_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 2 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_and_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB181_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB181_3 Depth 2 +; RV32IAXCHERI-NEXT: and a5, a3, a2 +; RV32IAXCHERI-NEXT: and a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB181_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB181_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB181_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB181_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB181_3 +; RV32IAXCHERI-NEXT: .LBB181_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB181_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB181_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_and_i64_acquire: @@ -12199,12 +12617,36 @@ define i64 @atomicrmw_and_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_and_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 3 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_and_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB182_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB182_3 Depth 2 +; RV32IAXCHERI-NEXT: and a5, a3, a2 +; RV32IAXCHERI-NEXT: and a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB182_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB182_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB182_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB182_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB182_3 +; RV32IAXCHERI-NEXT: .LBB182_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB182_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB182_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_and_i64_release: @@ -12238,12 +12680,36 @@ define i64 @atomicrmw_and_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_and_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 4 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_and_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB183_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB183_3 Depth 2 +; RV32IAXCHERI-NEXT: and a5, a3, a2 +; RV32IAXCHERI-NEXT: and a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB183_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB183_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB183_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB183_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB183_3 +; RV32IAXCHERI-NEXT: .LBB183_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB183_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB183_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_and_i64_acq_rel: @@ -12277,12 +12743,36 @@ define i64 @atomicrmw_and_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_and_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_and_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB184_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB184_3 Depth 2 +; RV32IAXCHERI-NEXT: and a5, a3, a2 +; RV32IAXCHERI-NEXT: and a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB184_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB184_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB184_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB184_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB184_3 +; RV32IAXCHERI-NEXT: .LBB184_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB184_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB184_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_and_i64_seq_cst: @@ -12316,12 +12806,38 @@ define i64 @atomicrmw_nand_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwin ; ; RV32IAXCHERI-LABEL: atomicrmw_nand_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_nand_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB185_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB185_3 Depth 2 +; RV32IAXCHERI-NEXT: and a5, a4, a1 +; RV32IAXCHERI-NEXT: and a6, a3, a2 +; RV32IAXCHERI-NEXT: not a6, a6 +; RV32IAXCHERI-NEXT: not a5, a5 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a5 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a6 +; RV32IAXCHERI-NEXT: .LBB185_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB185_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB185_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB185_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB185_3 +; RV32IAXCHERI-NEXT: .LBB185_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB185_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB185_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_nand_i64_monotonic: @@ -12362,12 +12878,38 @@ define i64 @atomicrmw_nand_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_nand_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 2 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_nand_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB186_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB186_3 Depth 2 +; RV32IAXCHERI-NEXT: and a5, a4, a1 +; RV32IAXCHERI-NEXT: and a6, a3, a2 +; RV32IAXCHERI-NEXT: not a6, a6 +; RV32IAXCHERI-NEXT: not a5, a5 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a5 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a6 +; RV32IAXCHERI-NEXT: .LBB186_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB186_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB186_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB186_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB186_3 +; RV32IAXCHERI-NEXT: .LBB186_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB186_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB186_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_nand_i64_acquire: @@ -12408,12 +12950,38 @@ define i64 @atomicrmw_nand_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_nand_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 3 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_nand_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB187_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB187_3 Depth 2 +; RV32IAXCHERI-NEXT: and a5, a4, a1 +; RV32IAXCHERI-NEXT: and a6, a3, a2 +; RV32IAXCHERI-NEXT: not a6, a6 +; RV32IAXCHERI-NEXT: not a5, a5 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a5 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a6 +; RV32IAXCHERI-NEXT: .LBB187_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB187_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB187_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB187_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB187_3 +; RV32IAXCHERI-NEXT: .LBB187_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB187_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB187_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_nand_i64_release: @@ -12454,12 +13022,38 @@ define i64 @atomicrmw_nand_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_nand_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 4 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_nand_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB188_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB188_3 Depth 2 +; RV32IAXCHERI-NEXT: and a5, a4, a1 +; RV32IAXCHERI-NEXT: and a6, a3, a2 +; RV32IAXCHERI-NEXT: not a6, a6 +; RV32IAXCHERI-NEXT: not a5, a5 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a5 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a6 +; RV32IAXCHERI-NEXT: .LBB188_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB188_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB188_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB188_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB188_3 +; RV32IAXCHERI-NEXT: .LBB188_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB188_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB188_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_nand_i64_acq_rel: @@ -12500,12 +13094,38 @@ define i64 @atomicrmw_nand_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_nand_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_nand_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB189_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB189_3 Depth 2 +; RV32IAXCHERI-NEXT: and a5, a4, a1 +; RV32IAXCHERI-NEXT: and a6, a3, a2 +; RV32IAXCHERI-NEXT: not a6, a6 +; RV32IAXCHERI-NEXT: not a5, a5 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a5 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a6 +; RV32IAXCHERI-NEXT: .LBB189_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB189_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB189_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB189_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB189_3 +; RV32IAXCHERI-NEXT: .LBB189_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB189_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB189_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_nand_i64_seq_cst: @@ -12546,12 +13166,36 @@ define i64 @atomicrmw_or_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_or_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_or_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB190_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB190_3 Depth 2 +; RV32IAXCHERI-NEXT: or a5, a3, a2 +; RV32IAXCHERI-NEXT: or a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB190_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB190_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB190_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB190_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB190_3 +; RV32IAXCHERI-NEXT: .LBB190_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB190_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB190_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_or_i64_monotonic: @@ -12585,12 +13229,36 @@ define i64 @atomicrmw_or_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_or_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 2 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_or_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB191_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB191_3 Depth 2 +; RV32IAXCHERI-NEXT: or a5, a3, a2 +; RV32IAXCHERI-NEXT: or a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB191_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB191_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB191_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB191_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB191_3 +; RV32IAXCHERI-NEXT: .LBB191_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB191_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB191_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_or_i64_acquire: @@ -12624,12 +13292,36 @@ define i64 @atomicrmw_or_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_or_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 3 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_or_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB192_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB192_3 Depth 2 +; RV32IAXCHERI-NEXT: or a5, a3, a2 +; RV32IAXCHERI-NEXT: or a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB192_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB192_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB192_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB192_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB192_3 +; RV32IAXCHERI-NEXT: .LBB192_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB192_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB192_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_or_i64_release: @@ -12663,12 +13355,36 @@ define i64 @atomicrmw_or_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_or_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 4 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_or_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB193_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB193_3 Depth 2 +; RV32IAXCHERI-NEXT: or a5, a3, a2 +; RV32IAXCHERI-NEXT: or a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB193_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB193_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB193_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB193_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB193_3 +; RV32IAXCHERI-NEXT: .LBB193_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB193_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB193_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_or_i64_acq_rel: @@ -12702,12 +13418,36 @@ define i64 @atomicrmw_or_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_or_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_or_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB194_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB194_3 Depth 2 +; RV32IAXCHERI-NEXT: or a5, a3, a2 +; RV32IAXCHERI-NEXT: or a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB194_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB194_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB194_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB194_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB194_3 +; RV32IAXCHERI-NEXT: .LBB194_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB194_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB194_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_or_i64_seq_cst: @@ -12741,12 +13481,36 @@ define i64 @atomicrmw_xor_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_xor_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_xor_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB195_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB195_3 Depth 2 +; RV32IAXCHERI-NEXT: xor a5, a3, a2 +; RV32IAXCHERI-NEXT: xor a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB195_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB195_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB195_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB195_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB195_3 +; RV32IAXCHERI-NEXT: .LBB195_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB195_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB195_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xor_i64_monotonic: @@ -12780,12 +13544,36 @@ define i64 @atomicrmw_xor_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_xor_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 2 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_xor_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB196_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB196_3 Depth 2 +; RV32IAXCHERI-NEXT: xor a5, a3, a2 +; RV32IAXCHERI-NEXT: xor a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB196_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB196_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB196_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB196_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB196_3 +; RV32IAXCHERI-NEXT: .LBB196_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB196_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB196_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xor_i64_acquire: @@ -12819,12 +13607,36 @@ define i64 @atomicrmw_xor_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_xor_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 3 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_xor_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB197_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB197_3 Depth 2 +; RV32IAXCHERI-NEXT: xor a5, a3, a2 +; RV32IAXCHERI-NEXT: xor a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB197_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB197_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB197_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB197_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB197_3 +; RV32IAXCHERI-NEXT: .LBB197_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB197_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB197_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xor_i64_release: @@ -12858,12 +13670,36 @@ define i64 @atomicrmw_xor_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_xor_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 4 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_xor_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB198_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB198_3 Depth 2 +; RV32IAXCHERI-NEXT: xor a5, a3, a2 +; RV32IAXCHERI-NEXT: xor a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB198_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB198_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB198_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB198_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB198_3 +; RV32IAXCHERI-NEXT: .LBB198_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB198_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB198_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xor_i64_acq_rel: @@ -12897,12 +13733,36 @@ define i64 @atomicrmw_xor_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_xor_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -16 -; RV32IAXCHERI-NEXT: csc cra, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: li a3, 5 -; RV32IAXCHERI-NEXT: ccall __atomic_fetch_xor_8 -; RV32IAXCHERI-NEXT: clc cra, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 16 +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) +; RV32IAXCHERI-NEXT: clw a4, 0(ca0) +; RV32IAXCHERI-NEXT: .LBB199_1: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB199_3 Depth 2 +; RV32IAXCHERI-NEXT: xor a5, a3, a2 +; RV32IAXCHERI-NEXT: xor a6, a4, a1 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a6 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB199_3: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB199_1 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB199_5 +; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB199_3 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB199_3 +; RV32IAXCHERI-NEXT: .LBB199_5: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB199_1 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: beqz a6, .LBB199_1 +; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end +; RV32IAXCHERI-NEXT: mv a0, a4 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xor_i64_seq_cst: @@ -12984,60 +13844,54 @@ define i64 @atomicrmw_max_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_max_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB200_2 ; RV32IAXCHERI-NEXT: .LBB200_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB200_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: li a4, 0 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB200_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB200_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB200_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB200_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB200_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB200_8 +; RV32IAXCHERI-NEXT: .LBB200_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB200_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB200_7 ; RV32IAXCHERI-NEXT: .LBB200_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB200_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB200_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB200_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB200_2 Depth=1 -; RV32IAXCHERI-NEXT: slt a0, s1, a5 +; RV32IAXCHERI-NEXT: slt a6, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB200_5 ; RV32IAXCHERI-NEXT: .LBB200_4: # in Loop: Header=BB200_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a6, a1, a4 ; RV32IAXCHERI-NEXT: .LBB200_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB200_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB200_1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB200_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB200_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB200_1 ; RV32IAXCHERI-NEXT: .LBB200_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_max_i64_monotonic: @@ -13149,60 +14003,54 @@ define i64 @atomicrmw_max_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_max_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB201_2 ; RV32IAXCHERI-NEXT: .LBB201_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB201_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 2 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB201_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB201_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB201_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB201_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB201_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB201_8 +; RV32IAXCHERI-NEXT: .LBB201_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB201_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB201_7 ; RV32IAXCHERI-NEXT: .LBB201_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB201_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB201_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB201_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB201_2 Depth=1 -; RV32IAXCHERI-NEXT: slt a0, s1, a5 +; RV32IAXCHERI-NEXT: slt a6, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB201_5 ; RV32IAXCHERI-NEXT: .LBB201_4: # in Loop: Header=BB201_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a6, a1, a4 ; RV32IAXCHERI-NEXT: .LBB201_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB201_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB201_1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB201_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB201_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB201_1 ; RV32IAXCHERI-NEXT: .LBB201_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_max_i64_acquire: @@ -13314,60 +14162,54 @@ define i64 @atomicrmw_max_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_max_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB202_2 ; RV32IAXCHERI-NEXT: .LBB202_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB202_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 3 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB202_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB202_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB202_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB202_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB202_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB202_8 +; RV32IAXCHERI-NEXT: .LBB202_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB202_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB202_7 ; RV32IAXCHERI-NEXT: .LBB202_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB202_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB202_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB202_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB202_2 Depth=1 -; RV32IAXCHERI-NEXT: slt a0, s1, a5 +; RV32IAXCHERI-NEXT: slt a6, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB202_5 ; RV32IAXCHERI-NEXT: .LBB202_4: # in Loop: Header=BB202_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a6, a1, a4 ; RV32IAXCHERI-NEXT: .LBB202_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB202_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB202_1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB202_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB202_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB202_1 ; RV32IAXCHERI-NEXT: .LBB202_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_max_i64_release: @@ -13479,60 +14321,54 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_max_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB203_2 ; RV32IAXCHERI-NEXT: .LBB203_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB203_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 4 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB203_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB203_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB203_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB203_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB203_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB203_8 +; RV32IAXCHERI-NEXT: .LBB203_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB203_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB203_7 ; RV32IAXCHERI-NEXT: .LBB203_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB203_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB203_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB203_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB203_2 Depth=1 -; RV32IAXCHERI-NEXT: slt a0, s1, a5 +; RV32IAXCHERI-NEXT: slt a6, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB203_5 ; RV32IAXCHERI-NEXT: .LBB203_4: # in Loop: Header=BB203_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a6, a1, a4 ; RV32IAXCHERI-NEXT: .LBB203_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB203_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB203_1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB203_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB203_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB203_1 ; RV32IAXCHERI-NEXT: .LBB203_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_max_i64_acq_rel: @@ -13644,60 +14480,54 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_max_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB204_2 ; RV32IAXCHERI-NEXT: .LBB204_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB204_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 5 -; RV32IAXCHERI-NEXT: li a5, 5 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB204_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB204_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB204_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB204_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB204_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB204_8 +; RV32IAXCHERI-NEXT: .LBB204_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB204_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB204_7 ; RV32IAXCHERI-NEXT: .LBB204_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB204_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB204_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB204_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB204_2 Depth=1 -; RV32IAXCHERI-NEXT: slt a0, s1, a5 +; RV32IAXCHERI-NEXT: slt a6, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB204_5 ; RV32IAXCHERI-NEXT: .LBB204_4: # in Loop: Header=BB204_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a6, a1, a4 ; RV32IAXCHERI-NEXT: .LBB204_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB204_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB204_1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB204_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB204_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB204_1 ; RV32IAXCHERI-NEXT: .LBB204_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_max_i64_seq_cst: @@ -13810,61 +14640,55 @@ define i64 @atomicrmw_min_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_min_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB205_2 ; RV32IAXCHERI-NEXT: .LBB205_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB205_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: li a4, 0 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB205_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB205_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB205_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB205_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB205_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB205_8 +; RV32IAXCHERI-NEXT: .LBB205_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB205_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB205_7 ; RV32IAXCHERI-NEXT: .LBB205_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB205_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB205_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB205_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB205_2 Depth=1 -; RV32IAXCHERI-NEXT: slt a0, s1, a5 +; RV32IAXCHERI-NEXT: slt a5, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB205_5 ; RV32IAXCHERI-NEXT: .LBB205_4: # in Loop: Header=BB205_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a5, a1, a4 ; RV32IAXCHERI-NEXT: .LBB205_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB205_2 Depth=1 -; RV32IAXCHERI-NEXT: xori a0, a0, 1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB205_1 +; RV32IAXCHERI-NEXT: xori a6, a5, 1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB205_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB205_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB205_1 ; RV32IAXCHERI-NEXT: .LBB205_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_min_i64_monotonic: @@ -13977,61 +14801,55 @@ define i64 @atomicrmw_min_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_min_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB206_2 ; RV32IAXCHERI-NEXT: .LBB206_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB206_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 2 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB206_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB206_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB206_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB206_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB206_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB206_8 +; RV32IAXCHERI-NEXT: .LBB206_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB206_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB206_7 ; RV32IAXCHERI-NEXT: .LBB206_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB206_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB206_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB206_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB206_2 Depth=1 -; RV32IAXCHERI-NEXT: slt a0, s1, a5 +; RV32IAXCHERI-NEXT: slt a5, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB206_5 ; RV32IAXCHERI-NEXT: .LBB206_4: # in Loop: Header=BB206_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a5, a1, a4 ; RV32IAXCHERI-NEXT: .LBB206_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB206_2 Depth=1 -; RV32IAXCHERI-NEXT: xori a0, a0, 1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB206_1 +; RV32IAXCHERI-NEXT: xori a6, a5, 1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB206_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB206_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB206_1 ; RV32IAXCHERI-NEXT: .LBB206_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_min_i64_acquire: @@ -14144,61 +14962,55 @@ define i64 @atomicrmw_min_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_min_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB207_2 ; RV32IAXCHERI-NEXT: .LBB207_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB207_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 3 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB207_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB207_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB207_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB207_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB207_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB207_8 +; RV32IAXCHERI-NEXT: .LBB207_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB207_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB207_7 ; RV32IAXCHERI-NEXT: .LBB207_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB207_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB207_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB207_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB207_2 Depth=1 -; RV32IAXCHERI-NEXT: slt a0, s1, a5 +; RV32IAXCHERI-NEXT: slt a5, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB207_5 ; RV32IAXCHERI-NEXT: .LBB207_4: # in Loop: Header=BB207_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a5, a1, a4 ; RV32IAXCHERI-NEXT: .LBB207_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB207_2 Depth=1 -; RV32IAXCHERI-NEXT: xori a0, a0, 1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB207_1 +; RV32IAXCHERI-NEXT: xori a6, a5, 1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB207_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB207_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB207_1 ; RV32IAXCHERI-NEXT: .LBB207_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_min_i64_release: @@ -14311,61 +15123,55 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_min_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB208_2 ; RV32IAXCHERI-NEXT: .LBB208_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB208_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 4 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB208_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB208_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB208_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB208_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB208_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB208_8 +; RV32IAXCHERI-NEXT: .LBB208_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB208_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB208_7 ; RV32IAXCHERI-NEXT: .LBB208_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB208_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB208_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB208_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB208_2 Depth=1 -; RV32IAXCHERI-NEXT: slt a0, s1, a5 +; RV32IAXCHERI-NEXT: slt a5, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB208_5 ; RV32IAXCHERI-NEXT: .LBB208_4: # in Loop: Header=BB208_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a5, a1, a4 ; RV32IAXCHERI-NEXT: .LBB208_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB208_2 Depth=1 -; RV32IAXCHERI-NEXT: xori a0, a0, 1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB208_1 +; RV32IAXCHERI-NEXT: xori a6, a5, 1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB208_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB208_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB208_1 ; RV32IAXCHERI-NEXT: .LBB208_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_min_i64_acq_rel: @@ -14478,61 +15284,55 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind { ; ; RV32IAXCHERI-LABEL: atomicrmw_min_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB209_2 ; RV32IAXCHERI-NEXT: .LBB209_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB209_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 5 -; RV32IAXCHERI-NEXT: li a5, 5 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB209_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB209_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB209_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB209_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB209_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB209_8 +; RV32IAXCHERI-NEXT: .LBB209_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB209_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB209_7 ; RV32IAXCHERI-NEXT: .LBB209_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB209_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB209_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB209_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB209_2 Depth=1 -; RV32IAXCHERI-NEXT: slt a0, s1, a5 +; RV32IAXCHERI-NEXT: slt a5, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB209_5 ; RV32IAXCHERI-NEXT: .LBB209_4: # in Loop: Header=BB209_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a5, a1, a4 ; RV32IAXCHERI-NEXT: .LBB209_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB209_2 Depth=1 -; RV32IAXCHERI-NEXT: xori a0, a0, 1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB209_1 +; RV32IAXCHERI-NEXT: xori a6, a5, 1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB209_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB209_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB209_1 ; RV32IAXCHERI-NEXT: .LBB209_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_min_i64_seq_cst: @@ -14644,60 +15444,54 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwin ; ; RV32IAXCHERI-LABEL: atomicrmw_umax_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB210_2 ; RV32IAXCHERI-NEXT: .LBB210_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB210_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: li a4, 0 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB210_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB210_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB210_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB210_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB210_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB210_8 +; RV32IAXCHERI-NEXT: .LBB210_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB210_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB210_7 ; RV32IAXCHERI-NEXT: .LBB210_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB210_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB210_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB210_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB210_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s1, a5 +; RV32IAXCHERI-NEXT: sltu a6, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB210_5 ; RV32IAXCHERI-NEXT: .LBB210_4: # in Loop: Header=BB210_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a6, a1, a4 ; RV32IAXCHERI-NEXT: .LBB210_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB210_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB210_1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB210_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB210_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB210_1 ; RV32IAXCHERI-NEXT: .LBB210_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_umax_i64_monotonic: @@ -14809,60 +15603,54 @@ define i64 @atomicrmw_umax_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_umax_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB211_2 ; RV32IAXCHERI-NEXT: .LBB211_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB211_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 2 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB211_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB211_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB211_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB211_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB211_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB211_8 +; RV32IAXCHERI-NEXT: .LBB211_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB211_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB211_7 ; RV32IAXCHERI-NEXT: .LBB211_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB211_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB211_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB211_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB211_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s1, a5 +; RV32IAXCHERI-NEXT: sltu a6, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB211_5 ; RV32IAXCHERI-NEXT: .LBB211_4: # in Loop: Header=BB211_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a6, a1, a4 ; RV32IAXCHERI-NEXT: .LBB211_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB211_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB211_1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB211_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB211_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB211_1 ; RV32IAXCHERI-NEXT: .LBB211_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_umax_i64_acquire: @@ -14974,60 +15762,54 @@ define i64 @atomicrmw_umax_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_umax_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB212_2 ; RV32IAXCHERI-NEXT: .LBB212_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB212_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 3 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB212_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB212_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB212_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB212_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB212_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB212_8 +; RV32IAXCHERI-NEXT: .LBB212_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB212_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB212_7 ; RV32IAXCHERI-NEXT: .LBB212_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB212_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB212_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB212_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB212_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s1, a5 +; RV32IAXCHERI-NEXT: sltu a6, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB212_5 ; RV32IAXCHERI-NEXT: .LBB212_4: # in Loop: Header=BB212_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a6, a1, a4 ; RV32IAXCHERI-NEXT: .LBB212_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB212_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB212_1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB212_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB212_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB212_1 ; RV32IAXCHERI-NEXT: .LBB212_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_umax_i64_release: @@ -15139,60 +15921,54 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_umax_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB213_2 ; RV32IAXCHERI-NEXT: .LBB213_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB213_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 4 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB213_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB213_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB213_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB213_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB213_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB213_8 +; RV32IAXCHERI-NEXT: .LBB213_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB213_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB213_7 ; RV32IAXCHERI-NEXT: .LBB213_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB213_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB213_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB213_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB213_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s1, a5 +; RV32IAXCHERI-NEXT: sltu a6, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB213_5 ; RV32IAXCHERI-NEXT: .LBB213_4: # in Loop: Header=BB213_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a6, a1, a4 ; RV32IAXCHERI-NEXT: .LBB213_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB213_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB213_1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB213_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB213_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB213_1 ; RV32IAXCHERI-NEXT: .LBB213_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_umax_i64_acq_rel: @@ -15304,60 +16080,54 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_umax_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB214_2 ; RV32IAXCHERI-NEXT: .LBB214_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB214_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 5 -; RV32IAXCHERI-NEXT: li a5, 5 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB214_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB214_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB214_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB214_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB214_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB214_8 +; RV32IAXCHERI-NEXT: .LBB214_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB214_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB214_7 ; RV32IAXCHERI-NEXT: .LBB214_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB214_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB214_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB214_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB214_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s1, a5 +; RV32IAXCHERI-NEXT: sltu a6, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB214_5 ; RV32IAXCHERI-NEXT: .LBB214_4: # in Loop: Header=BB214_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a6, a1, a4 ; RV32IAXCHERI-NEXT: .LBB214_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB214_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB214_1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB214_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB214_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB214_1 ; RV32IAXCHERI-NEXT: .LBB214_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_umax_i64_seq_cst: @@ -15470,61 +16240,55 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwin ; ; RV32IAXCHERI-LABEL: atomicrmw_umin_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB215_2 ; RV32IAXCHERI-NEXT: .LBB215_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB215_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: li a4, 0 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB215_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB215_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB215_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB215_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB215_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB215_8 +; RV32IAXCHERI-NEXT: .LBB215_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB215_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB215_7 ; RV32IAXCHERI-NEXT: .LBB215_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB215_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB215_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB215_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB215_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s1, a5 +; RV32IAXCHERI-NEXT: sltu a5, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB215_5 ; RV32IAXCHERI-NEXT: .LBB215_4: # in Loop: Header=BB215_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a5, a1, a4 ; RV32IAXCHERI-NEXT: .LBB215_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB215_2 Depth=1 -; RV32IAXCHERI-NEXT: xori a0, a0, 1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB215_1 +; RV32IAXCHERI-NEXT: xori a6, a5, 1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB215_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB215_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB215_1 ; RV32IAXCHERI-NEXT: .LBB215_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_umin_i64_monotonic: @@ -15637,61 +16401,55 @@ define i64 @atomicrmw_umin_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_umin_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB216_2 ; RV32IAXCHERI-NEXT: .LBB216_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB216_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 2 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB216_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB216_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB216_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB216_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB216_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB216_8 +; RV32IAXCHERI-NEXT: .LBB216_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB216_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB216_7 ; RV32IAXCHERI-NEXT: .LBB216_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB216_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB216_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB216_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB216_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s1, a5 +; RV32IAXCHERI-NEXT: sltu a5, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB216_5 ; RV32IAXCHERI-NEXT: .LBB216_4: # in Loop: Header=BB216_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a5, a1, a4 ; RV32IAXCHERI-NEXT: .LBB216_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB216_2 Depth=1 -; RV32IAXCHERI-NEXT: xori a0, a0, 1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB216_1 +; RV32IAXCHERI-NEXT: xori a6, a5, 1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB216_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB216_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB216_1 ; RV32IAXCHERI-NEXT: .LBB216_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_umin_i64_acquire: @@ -15804,61 +16562,55 @@ define i64 @atomicrmw_umin_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_umin_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB217_2 ; RV32IAXCHERI-NEXT: .LBB217_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB217_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 3 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: li a5, 0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB217_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB217_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB217_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB217_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB217_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB217_8 +; RV32IAXCHERI-NEXT: .LBB217_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB217_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB217_7 ; RV32IAXCHERI-NEXT: .LBB217_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB217_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB217_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB217_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB217_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s1, a5 +; RV32IAXCHERI-NEXT: sltu a5, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB217_5 ; RV32IAXCHERI-NEXT: .LBB217_4: # in Loop: Header=BB217_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a5, a1, a4 ; RV32IAXCHERI-NEXT: .LBB217_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB217_2 Depth=1 -; RV32IAXCHERI-NEXT: xori a0, a0, 1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB217_1 +; RV32IAXCHERI-NEXT: xori a6, a5, 1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB217_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB217_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB217_1 ; RV32IAXCHERI-NEXT: .LBB217_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_umin_i64_release: @@ -15971,61 +16723,55 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_umin_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB218_2 ; RV32IAXCHERI-NEXT: .LBB218_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB218_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 4 -; RV32IAXCHERI-NEXT: li a5, 2 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB218_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB218_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB218_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB218_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB218_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB218_8 +; RV32IAXCHERI-NEXT: .LBB218_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB218_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB218_7 ; RV32IAXCHERI-NEXT: .LBB218_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB218_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB218_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB218_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB218_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s1, a5 +; RV32IAXCHERI-NEXT: sltu a5, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB218_5 ; RV32IAXCHERI-NEXT: .LBB218_4: # in Loop: Header=BB218_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a5, a1, a4 ; RV32IAXCHERI-NEXT: .LBB218_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB218_2 Depth=1 -; RV32IAXCHERI-NEXT: xori a0, a0, 1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB218_1 +; RV32IAXCHERI-NEXT: xori a6, a5, 1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB218_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB218_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB218_1 ; RV32IAXCHERI-NEXT: .LBB218_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_umin_i64_acq_rel: @@ -16138,61 +16884,55 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_umin_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: cincoffset csp, csp, -48 -; RV32IAXCHERI-NEXT: csc cra, 40(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs0, 32(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs1, 24(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs2, 16(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: csc cs3, 8(csp) # 8-byte Folded Spill -; RV32IAXCHERI-NEXT: cmove cs3, ca0 -; RV32IAXCHERI-NEXT: clw a5, 4(ca0) +; RV32IAXCHERI-NEXT: clw a3, 4(ca0) ; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: mv s1, a2 -; RV32IAXCHERI-NEXT: mv s2, a1 -; RV32IAXCHERI-NEXT: cincoffset ca0, csp, 0 -; RV32IAXCHERI-NEXT: csetbounds cs0, ca0, 8 ; RV32IAXCHERI-NEXT: j .LBB219_2 ; RV32IAXCHERI-NEXT: .LBB219_1: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB219_2 Depth=1 -; RV32IAXCHERI-NEXT: csw a4, 0(csp) -; RV32IAXCHERI-NEXT: csw a5, 4(csp) -; RV32IAXCHERI-NEXT: li a4, 5 -; RV32IAXCHERI-NEXT: li a5, 5 -; RV32IAXCHERI-NEXT: cmove ca0, cs3 -; RV32IAXCHERI-NEXT: cmove ca1, cs0 -; RV32IAXCHERI-NEXT: ccall __atomic_compare_exchange_8 -; RV32IAXCHERI-NEXT: clw a5, 4(csp) -; RV32IAXCHERI-NEXT: clw a4, 0(csp) -; RV32IAXCHERI-NEXT: bnez a0, .LBB219_7 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 +; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 +; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a7 +; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a5 +; RV32IAXCHERI-NEXT: .LBB219_8: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # Parent Loop BB219_2 Depth=1 +; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 +; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: beqz a6, .LBB219_10 +; RV32IAXCHERI-NEXT: # %bb.9: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB219_8 Depth=2 +; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) +; RV32IAXCHERI-NEXT: bnez a6, .LBB219_8 +; RV32IAXCHERI-NEXT: .LBB219_10: # %atomicrmw.start +; RV32IAXCHERI-NEXT: # in Loop: Header=BB219_2 Depth=1 +; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 +; RV32IAXCHERI-NEXT: mv a4, a5 +; RV32IAXCHERI-NEXT: cgethigh a3, ca5 +; RV32IAXCHERI-NEXT: bnez a6, .LBB219_7 ; RV32IAXCHERI-NEXT: .LBB219_2: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: beq a5, s1, .LBB219_4 +; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 +; RV32IAXCHERI-NEXT: # Child Loop BB219_8 Depth 2 +; RV32IAXCHERI-NEXT: beq a3, a2, .LBB219_4 ; RV32IAXCHERI-NEXT: # %bb.3: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB219_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s1, a5 +; RV32IAXCHERI-NEXT: sltu a5, a2, a3 ; RV32IAXCHERI-NEXT: j .LBB219_5 ; RV32IAXCHERI-NEXT: .LBB219_4: # in Loop: Header=BB219_2 Depth=1 -; RV32IAXCHERI-NEXT: sltu a0, s2, a4 +; RV32IAXCHERI-NEXT: sltu a5, a1, a4 ; RV32IAXCHERI-NEXT: .LBB219_5: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB219_2 Depth=1 -; RV32IAXCHERI-NEXT: xori a0, a0, 1 -; RV32IAXCHERI-NEXT: mv a2, a4 -; RV32IAXCHERI-NEXT: mv a3, a5 -; RV32IAXCHERI-NEXT: bnez a0, .LBB219_1 +; RV32IAXCHERI-NEXT: xori a6, a5, 1 +; RV32IAXCHERI-NEXT: mv a5, a3 +; RV32IAXCHERI-NEXT: mv a7, a4 +; RV32IAXCHERI-NEXT: bnez a6, .LBB219_1 ; RV32IAXCHERI-NEXT: # %bb.6: # %atomicrmw.start ; RV32IAXCHERI-NEXT: # in Loop: Header=BB219_2 Depth=1 -; RV32IAXCHERI-NEXT: mv a2, s2 -; RV32IAXCHERI-NEXT: mv a3, s1 +; RV32IAXCHERI-NEXT: mv a5, a2 +; RV32IAXCHERI-NEXT: mv a7, a1 ; RV32IAXCHERI-NEXT: j .LBB219_1 ; RV32IAXCHERI-NEXT: .LBB219_7: # %atomicrmw.end ; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a5 -; RV32IAXCHERI-NEXT: clc cra, 40(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs0, 32(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs1, 24(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs2, 16(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: clc cs3, 8(csp) # 8-byte Folded Reload -; RV32IAXCHERI-NEXT: cincoffset csp, csp, 48 +; RV32IAXCHERI-NEXT: mv a1, a3 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_umin_i64_seq_cst: From 7ecb141459d258023105da7c374bc126c77922fe Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Thu, 21 Sep 2023 11:40:25 -0700 Subject: [PATCH 11/18] [CHERI-RISC-V] Use camoswap.c for 2*XLen integer Xchg operations This is a lot more efficient that a CMPXCHG loop. --- llvm/lib/CodeGen/AtomicExpandPass.cpp | 26 +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 + .../RISCV32/atomic-cap-size-int.ll | 106 +++-------- .../RISCV64/atomic-cap-size-int.ll | 106 +++-------- llvm/test/CodeGen/RISCV/cheri/atomic-rmw.ll | 165 +++--------------- 5 files changed, 97 insertions(+), 308 deletions(-) diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp index 524402369c8e..a6a545bc2cf0 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -95,6 +95,7 @@ class AtomicExpand : public FunctionPass { void expandPartwordAtomicRMW( AtomicRMWInst *I, TargetLoweringBase::AtomicExpansionKind ExpansionKind); AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI); + AtomicRMWInst *convertAtomicXchgToCapabilityType(AtomicRMWInst *SI); bool expandPartwordCmpXchg(AtomicCmpXchgInst *I); void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI); void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI); @@ -603,6 +604,28 @@ AtomicExpand::convertAtomicStoreToCapabilityType(llvm::StoreInst *SI) { return NewSI; } +AtomicRMWInst * +AtomicExpand::convertAtomicXchgToCapabilityType(llvm::AtomicRMWInst *AI) { + IRBuilder<> Builder(AI); + assert(AI->getOperation() == AtomicRMWInst::Xchg); + Type *CapTy; + Value *NewAddr = getCapAddr(AI->getPointerOperand(), &CapTy, Builder, TLI); + const DataLayout &DL = AI->getModule()->getDataLayout(); + Value *NewNewVal = + integerToSameSizeCapability(AI->getValOperand(), Builder, CapTy, DL); + + auto *NewAI = Builder.CreateAtomicRMW(AI->getOperation(), NewAddr, NewNewVal, + AI->getAlign(), AI->getOrdering(), + AI->getSyncScopeID()); + NewAI->setVolatile(AI->isVolatile()); + LLVM_DEBUG(dbgs() << "Replaced " << *AI << " with " << *NewAI << "\n"); + + Value *Result = integerFromSameSizeCapability(NewAI, Builder, DL); + AI->replaceAllUsesWith(Result); + AI->eraseFromParent(); + return NewAI; +} + AtomicCmpXchgInst * AtomicExpand::convertCmpXchgToCapabilityType(llvm::AtomicCmpXchgInst *CI) { IRBuilder<> Builder(CI); @@ -742,6 +765,9 @@ bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) { TLI->emitBitTestAtomicRMWIntrinsic(AI); return true; } + case TargetLoweringBase::AtomicExpansionKind::CheriCapability: + convertAtomicXchgToCapabilityType(AI); + return true; case TargetLoweringBase::AtomicExpansionKind::NotAtomic: return lowerAtomicRMWInst(AI); default: diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 0da520d5c385..27216ada7cce 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12763,6 +12763,8 @@ RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { return AtomicExpansionKind::MaskedIntrinsic; if (Subtarget.hasCheri() && Size == Subtarget.typeForCapabilities().getSizeInBits()) { + if (AI->getOperation() == AtomicRMWInst::Xchg) + return AtomicExpansionKind::CheriCapability; return AtomicExpansionKind::CmpXChg; } return AtomicExpansionKind::None; diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll index 1ba10fde024e..653bec3808d4 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll @@ -178,34 +178,11 @@ define i64 @load(ptr addrspace(200) %ptr) nounwind { define i64 @atomic_xchg(ptr addrspace(200) %ptr, i64 %val) nounwind { ; PURECAP-ATOMICS-LABEL: atomic_xchg: ; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: clw a3, 4(ca0) -; PURECAP-ATOMICS-NEXT: clw a4, 0(ca0) -; PURECAP-ATOMICS-NEXT: .LBB2_1: # %atomicrmw.start -; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 -; PURECAP-ATOMICS-NEXT: # Child Loop BB2_3 Depth 2 -; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 -; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 -; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a1 -; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a2 -; PURECAP-ATOMICS-NEXT: .LBB2_3: # %atomicrmw.start -; PURECAP-ATOMICS-NEXT: # Parent Loop BB2_1 Depth=1 -; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 -; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) -; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 -; PURECAP-ATOMICS-NEXT: beqz a6, .LBB2_5 -; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start -; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB2_3 Depth=2 -; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a6, .LBB2_3 -; PURECAP-ATOMICS-NEXT: .LBB2_5: # %atomicrmw.start -; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB2_1 Depth=1 -; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 -; PURECAP-ATOMICS-NEXT: mv a4, a5 -; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 -; PURECAP-ATOMICS-NEXT: beqz a6, .LBB2_1 -; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end -; PURECAP-ATOMICS-NEXT: mv a0, a4 -; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca1, cnull, a1 +; PURECAP-ATOMICS-NEXT: csethigh ca1, ca1, a2 +; PURECAP-ATOMICS-NEXT: camoswap.c.aqrl ca1, ca1, (ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cgethigh a1, ca1 ; PURECAP-ATOMICS-NEXT: cret ; ; PURECAP-LIBCALLS-LABEL: atomic_xchg: @@ -220,34 +197,11 @@ define i64 @atomic_xchg(ptr addrspace(200) %ptr, i64 %val) nounwind { ; ; HYBRID-ATOMICS-LABEL: atomic_xchg: ; HYBRID-ATOMICS: # %bb.0: -; HYBRID-ATOMICS-NEXT: lw a3, 4(a0) -; HYBRID-ATOMICS-NEXT: lw a4, 0(a0) -; HYBRID-ATOMICS-NEXT: .LBB2_1: # %atomicrmw.start -; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 -; HYBRID-ATOMICS-NEXT: # Child Loop BB2_3 Depth 2 -; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 -; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 -; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a1 -; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a2 -; HYBRID-ATOMICS-NEXT: .LBB2_3: # %atomicrmw.start -; HYBRID-ATOMICS-NEXT: # Parent Loop BB2_1 Depth=1 -; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 -; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) -; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 -; HYBRID-ATOMICS-NEXT: beqz a6, .LBB2_5 -; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start -; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB2_3 Depth=2 -; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) -; HYBRID-ATOMICS-NEXT: bnez a6, .LBB2_3 -; HYBRID-ATOMICS-NEXT: .LBB2_5: # %atomicrmw.start -; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB2_1 Depth=1 -; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 -; HYBRID-ATOMICS-NEXT: mv a4, a5 -; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 -; HYBRID-ATOMICS-NEXT: beqz a6, .LBB2_1 -; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end -; HYBRID-ATOMICS-NEXT: mv a0, a4 -; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca1, cnull, a1 +; HYBRID-ATOMICS-NEXT: csethigh ca1, ca1, a2 +; HYBRID-ATOMICS-NEXT: amoswap.c.aqrl ca1, ca1, (a0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: cgethigh a1, ca1 ; HYBRID-ATOMICS-NEXT: ret ; ; HYBRID-LIBCALLS-LABEL: atomic_xchg: @@ -271,34 +225,18 @@ define i64 @atomic_xchg(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xchg ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(200) [[PTR]], align 8 -; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] -; PURECAP-IR: atomicrmw.start: -; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] -; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[LOADED]] -; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i64 [[LOADED]], 32 -; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 -; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP2]], i32 [[TMP4]]) -; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[VAL]] -; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i64 [[VAL]], 32 -; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32 -; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP6]], i32 [[TMP8]]) -; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 8 -; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 -; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 -; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP11]]) -; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP11]]) -; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i32 [[TMP13]] to i64 -; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 -; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 32 -; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i64 [[TMP15]], [[TMP17]] -; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP18]], 0 -; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i64, i1 } [[TMP19]], i1 [[TMP12]], 1 -; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP20]], 1 -; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP20]], 0 -; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] -; PURECAP-IR: atomicrmw.end: -; PURECAP-IR-NEXT: ret i64 [[NEWLOADED]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = lshr i64 [[VAL]], 32 +; PURECAP-IR-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP1]], i32 [[TMP3]]) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = atomicrmw xchg ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP4]] seq_cst, align 8 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = shl i64 [[TMP9]], 32 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = or i64 [[TMP8]], [[TMP10]] +; PURECAP-IR-NEXT: ret i64 [[TMP11]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xchg ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll index e7e4f29a53c6..28b8e6faeec5 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll @@ -178,34 +178,11 @@ define i128 @load(ptr addrspace(200) %ptr) nounwind { define i128 @atomic_xchg(ptr addrspace(200) %ptr, i128 %val) nounwind { ; PURECAP-ATOMICS-LABEL: atomic_xchg: ; PURECAP-ATOMICS: # %bb.0: -; PURECAP-ATOMICS-NEXT: cld a3, 8(ca0) -; PURECAP-ATOMICS-NEXT: cld a4, 0(ca0) -; PURECAP-ATOMICS-NEXT: .LBB2_1: # %atomicrmw.start -; PURECAP-ATOMICS-NEXT: # =>This Loop Header: Depth=1 -; PURECAP-ATOMICS-NEXT: # Child Loop BB2_3 Depth 2 -; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a4 -; PURECAP-ATOMICS-NEXT: csethigh ca3, ca4, a3 -; PURECAP-ATOMICS-NEXT: cincoffset ca4, cnull, a1 -; PURECAP-ATOMICS-NEXT: csethigh ca4, ca4, a2 -; PURECAP-ATOMICS-NEXT: .LBB2_3: # %atomicrmw.start -; PURECAP-ATOMICS-NEXT: # Parent Loop BB2_1 Depth=1 -; PURECAP-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 -; PURECAP-ATOMICS-NEXT: clr.c.aqrl ca5, (ca0) -; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 -; PURECAP-ATOMICS-NEXT: beqz a6, .LBB2_5 -; PURECAP-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start -; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB2_3 Depth=2 -; PURECAP-ATOMICS-NEXT: csc.c.aqrl a6, ca4, (ca0) -; PURECAP-ATOMICS-NEXT: bnez a6, .LBB2_3 -; PURECAP-ATOMICS-NEXT: .LBB2_5: # %atomicrmw.start -; PURECAP-ATOMICS-NEXT: # in Loop: Header=BB2_1 Depth=1 -; PURECAP-ATOMICS-NEXT: cseqx a6, ca5, ca3 -; PURECAP-ATOMICS-NEXT: mv a4, a5 -; PURECAP-ATOMICS-NEXT: cgethigh a3, ca5 -; PURECAP-ATOMICS-NEXT: beqz a6, .LBB2_1 -; PURECAP-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end -; PURECAP-ATOMICS-NEXT: mv a0, a4 -; PURECAP-ATOMICS-NEXT: mv a1, a3 +; PURECAP-ATOMICS-NEXT: cincoffset ca1, cnull, a1 +; PURECAP-ATOMICS-NEXT: csethigh ca1, ca1, a2 +; PURECAP-ATOMICS-NEXT: camoswap.c.aqrl ca1, ca1, (ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cgethigh a1, ca1 ; PURECAP-ATOMICS-NEXT: cret ; ; PURECAP-LIBCALLS-LABEL: atomic_xchg: @@ -220,34 +197,11 @@ define i128 @atomic_xchg(ptr addrspace(200) %ptr, i128 %val) nounwind { ; ; HYBRID-ATOMICS-LABEL: atomic_xchg: ; HYBRID-ATOMICS: # %bb.0: -; HYBRID-ATOMICS-NEXT: ld a3, 8(a0) -; HYBRID-ATOMICS-NEXT: ld a4, 0(a0) -; HYBRID-ATOMICS-NEXT: .LBB2_1: # %atomicrmw.start -; HYBRID-ATOMICS-NEXT: # =>This Loop Header: Depth=1 -; HYBRID-ATOMICS-NEXT: # Child Loop BB2_3 Depth 2 -; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a4 -; HYBRID-ATOMICS-NEXT: csethigh ca3, ca4, a3 -; HYBRID-ATOMICS-NEXT: cincoffset ca4, cnull, a1 -; HYBRID-ATOMICS-NEXT: csethigh ca4, ca4, a2 -; HYBRID-ATOMICS-NEXT: .LBB2_3: # %atomicrmw.start -; HYBRID-ATOMICS-NEXT: # Parent Loop BB2_1 Depth=1 -; HYBRID-ATOMICS-NEXT: # => This Inner Loop Header: Depth=2 -; HYBRID-ATOMICS-NEXT: lr.c.aqrl ca5, (a0) -; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 -; HYBRID-ATOMICS-NEXT: beqz a6, .LBB2_5 -; HYBRID-ATOMICS-NEXT: # %bb.4: # %atomicrmw.start -; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB2_3 Depth=2 -; HYBRID-ATOMICS-NEXT: sc.c.aqrl a6, ca4, (a0) -; HYBRID-ATOMICS-NEXT: bnez a6, .LBB2_3 -; HYBRID-ATOMICS-NEXT: .LBB2_5: # %atomicrmw.start -; HYBRID-ATOMICS-NEXT: # in Loop: Header=BB2_1 Depth=1 -; HYBRID-ATOMICS-NEXT: cseqx a6, ca5, ca3 -; HYBRID-ATOMICS-NEXT: mv a4, a5 -; HYBRID-ATOMICS-NEXT: cgethigh a3, ca5 -; HYBRID-ATOMICS-NEXT: beqz a6, .LBB2_1 -; HYBRID-ATOMICS-NEXT: # %bb.2: # %atomicrmw.end -; HYBRID-ATOMICS-NEXT: mv a0, a4 -; HYBRID-ATOMICS-NEXT: mv a1, a3 +; HYBRID-ATOMICS-NEXT: cincoffset ca1, cnull, a1 +; HYBRID-ATOMICS-NEXT: csethigh ca1, ca1, a2 +; HYBRID-ATOMICS-NEXT: amoswap.c.aqrl ca1, ca1, (a0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: cgethigh a1, ca1 ; HYBRID-ATOMICS-NEXT: ret ; ; HYBRID-LIBCALLS-LABEL: atomic_xchg: @@ -271,34 +225,18 @@ define i128 @atomic_xchg(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-CAP-PTR-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@atomic_xchg ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { -; PURECAP-IR-NEXT: [[TMP1:%.*]] = load i128, ptr addrspace(200) [[PTR]], align 16 -; PURECAP-IR-NEXT: br label [[ATOMICRMW_START:%.*]] -; PURECAP-IR: atomicrmw.start: -; PURECAP-IR-NEXT: [[LOADED:%.*]] = phi i128 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ] -; PURECAP-IR-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[LOADED]] -; PURECAP-IR-NEXT: [[TMP3:%.*]] = lshr i128 [[LOADED]], 64 -; PURECAP-IR-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 -; PURECAP-IR-NEXT: [[TMP5:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP2]], i64 [[TMP4]]) -; PURECAP-IR-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[VAL]] -; PURECAP-IR-NEXT: [[TMP7:%.*]] = lshr i128 [[VAL]], 64 -; PURECAP-IR-NEXT: [[TMP8:%.*]] = trunc i128 [[TMP7]] to i64 -; PURECAP-IR-NEXT: [[TMP9:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP6]], i64 [[TMP8]]) -; PURECAP-IR-NEXT: [[TMP10:%.*]] = cmpxchg exact ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP5]], ptr addrspace(200) [[TMP9]] seq_cst seq_cst, align 16 -; PURECAP-IR-NEXT: [[TMP11:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 0 -; PURECAP-IR-NEXT: [[TMP12:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP10]], 1 -; PURECAP-IR-NEXT: [[TMP13:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP11]]) -; PURECAP-IR-NEXT: [[TMP14:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP11]]) -; PURECAP-IR-NEXT: [[TMP15:%.*]] = zext i64 [[TMP13]] to i128 -; PURECAP-IR-NEXT: [[TMP16:%.*]] = zext i64 [[TMP14]] to i128 -; PURECAP-IR-NEXT: [[TMP17:%.*]] = shl i128 [[TMP16]], 64 -; PURECAP-IR-NEXT: [[TMP18:%.*]] = or i128 [[TMP15]], [[TMP17]] -; PURECAP-IR-NEXT: [[TMP19:%.*]] = insertvalue { i128, i1 } undef, i128 [[TMP18]], 0 -; PURECAP-IR-NEXT: [[TMP20:%.*]] = insertvalue { i128, i1 } [[TMP19]], i1 [[TMP12]], 1 -; PURECAP-IR-NEXT: [[SUCCESS:%.*]] = extractvalue { i128, i1 } [[TMP20]], 1 -; PURECAP-IR-NEXT: [[NEWLOADED]] = extractvalue { i128, i1 } [[TMP20]], 0 -; PURECAP-IR-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]] -; PURECAP-IR: atomicrmw.end: -; PURECAP-IR-NEXT: ret i128 [[NEWLOADED]] +; PURECAP-IR-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[VAL]] +; PURECAP-IR-NEXT: [[TMP2:%.*]] = lshr i128 [[VAL]], 64 +; PURECAP-IR-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i64 +; PURECAP-IR-NEXT: [[TMP4:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP1]], i64 [[TMP3]]) +; PURECAP-IR-NEXT: [[TMP5:%.*]] = atomicrmw xchg ptr addrspace(200) [[PTR]], ptr addrspace(200) [[TMP4]] seq_cst, align 16 +; PURECAP-IR-NEXT: [[TMP6:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP7:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP5]]) +; PURECAP-IR-NEXT: [[TMP8:%.*]] = zext i64 [[TMP6]] to i128 +; PURECAP-IR-NEXT: [[TMP9:%.*]] = zext i64 [[TMP7]] to i128 +; PURECAP-IR-NEXT: [[TMP10:%.*]] = shl i128 [[TMP9]], 64 +; PURECAP-IR-NEXT: [[TMP11:%.*]] = or i128 [[TMP8]], [[TMP10]] +; PURECAP-IR-NEXT: ret i128 [[TMP11]] ; ; HYBRID-IR-LABEL: define {{[^@]+}}@atomic_xchg ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/CodeGen/RISCV/cheri/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/cheri/atomic-rmw.ll index 1e04f6f3b825..2b21d40b9cf3 100644 --- a/llvm/test/CodeGen/RISCV/cheri/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/cheri/atomic-rmw.ll @@ -11531,34 +11531,11 @@ define i64 @atomicrmw_xchg_i64_monotonic(i64 addrspace(200)* %a, i64 %b) nounwin ; ; RV32IAXCHERI-LABEL: atomicrmw_xchg_i64_monotonic: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: clw a3, 4(ca0) -; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: .LBB165_1: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: # Child Loop BB165_3 Depth 2 -; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 -; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 -; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a1 -; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a2 -; RV32IAXCHERI-NEXT: .LBB165_3: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # Parent Loop BB165_1 Depth=1 -; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 -; RV32IAXCHERI-NEXT: clr.c ca5, (ca0) -; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 -; RV32IAXCHERI-NEXT: beqz a6, .LBB165_5 -; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # in Loop: Header=BB165_3 Depth=2 -; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) -; RV32IAXCHERI-NEXT: bnez a6, .LBB165_3 -; RV32IAXCHERI-NEXT: .LBB165_5: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # in Loop: Header=BB165_1 Depth=1 -; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 -; RV32IAXCHERI-NEXT: mv a4, a5 -; RV32IAXCHERI-NEXT: cgethigh a3, ca5 -; RV32IAXCHERI-NEXT: beqz a6, .LBB165_1 -; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end -; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a3 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: camoswap.c ca1, ca1, (ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xchg_i64_monotonic: @@ -11592,34 +11569,11 @@ define i64 @atomicrmw_xchg_i64_acquire(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_xchg_i64_acquire: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: clw a3, 4(ca0) -; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: .LBB166_1: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: # Child Loop BB166_3 Depth 2 -; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 -; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 -; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a1 -; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a2 -; RV32IAXCHERI-NEXT: .LBB166_3: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # Parent Loop BB166_1 Depth=1 -; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 -; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) -; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 -; RV32IAXCHERI-NEXT: beqz a6, .LBB166_5 -; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # in Loop: Header=BB166_3 Depth=2 -; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) -; RV32IAXCHERI-NEXT: bnez a6, .LBB166_3 -; RV32IAXCHERI-NEXT: .LBB166_5: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # in Loop: Header=BB166_1 Depth=1 -; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 -; RV32IAXCHERI-NEXT: mv a4, a5 -; RV32IAXCHERI-NEXT: cgethigh a3, ca5 -; RV32IAXCHERI-NEXT: beqz a6, .LBB166_1 -; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end -; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a3 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: camoswap.c.aq ca1, ca1, (ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xchg_i64_acquire: @@ -11653,34 +11607,11 @@ define i64 @atomicrmw_xchg_i64_release(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_xchg_i64_release: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: clw a3, 4(ca0) -; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: .LBB167_1: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: # Child Loop BB167_3 Depth 2 -; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 -; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 -; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a1 -; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a2 -; RV32IAXCHERI-NEXT: .LBB167_3: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # Parent Loop BB167_1 Depth=1 -; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 -; RV32IAXCHERI-NEXT: clr.c.rl ca5, (ca0) -; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 -; RV32IAXCHERI-NEXT: beqz a6, .LBB167_5 -; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # in Loop: Header=BB167_3 Depth=2 -; RV32IAXCHERI-NEXT: csc.c a6, ca4, (ca0) -; RV32IAXCHERI-NEXT: bnez a6, .LBB167_3 -; RV32IAXCHERI-NEXT: .LBB167_5: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # in Loop: Header=BB167_1 Depth=1 -; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 -; RV32IAXCHERI-NEXT: mv a4, a5 -; RV32IAXCHERI-NEXT: cgethigh a3, ca5 -; RV32IAXCHERI-NEXT: beqz a6, .LBB167_1 -; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end -; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a3 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: camoswap.c.rl ca1, ca1, (ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xchg_i64_release: @@ -11714,34 +11645,11 @@ define i64 @atomicrmw_xchg_i64_acq_rel(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_xchg_i64_acq_rel: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: clw a3, 4(ca0) -; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: .LBB168_1: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: # Child Loop BB168_3 Depth 2 -; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 -; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 -; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a1 -; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a2 -; RV32IAXCHERI-NEXT: .LBB168_3: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # Parent Loop BB168_1 Depth=1 -; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 -; RV32IAXCHERI-NEXT: clr.c.aq ca5, (ca0) -; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 -; RV32IAXCHERI-NEXT: beqz a6, .LBB168_5 -; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # in Loop: Header=BB168_3 Depth=2 -; RV32IAXCHERI-NEXT: csc.c.aq a6, ca4, (ca0) -; RV32IAXCHERI-NEXT: bnez a6, .LBB168_3 -; RV32IAXCHERI-NEXT: .LBB168_5: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # in Loop: Header=BB168_1 Depth=1 -; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 -; RV32IAXCHERI-NEXT: mv a4, a5 -; RV32IAXCHERI-NEXT: cgethigh a3, ca5 -; RV32IAXCHERI-NEXT: beqz a6, .LBB168_1 -; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end -; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a3 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: camoswap.c.aqrl ca1, ca1, (ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xchg_i64_acq_rel: @@ -11775,34 +11683,11 @@ define i64 @atomicrmw_xchg_i64_seq_cst(i64 addrspace(200)* %a, i64 %b) nounwind ; ; RV32IAXCHERI-LABEL: atomicrmw_xchg_i64_seq_cst: ; RV32IAXCHERI: # %bb.0: -; RV32IAXCHERI-NEXT: clw a3, 4(ca0) -; RV32IAXCHERI-NEXT: clw a4, 0(ca0) -; RV32IAXCHERI-NEXT: .LBB169_1: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # =>This Loop Header: Depth=1 -; RV32IAXCHERI-NEXT: # Child Loop BB169_3 Depth 2 -; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a4 -; RV32IAXCHERI-NEXT: csethigh ca3, ca4, a3 -; RV32IAXCHERI-NEXT: cincoffset ca4, cnull, a1 -; RV32IAXCHERI-NEXT: csethigh ca4, ca4, a2 -; RV32IAXCHERI-NEXT: .LBB169_3: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # Parent Loop BB169_1 Depth=1 -; RV32IAXCHERI-NEXT: # => This Inner Loop Header: Depth=2 -; RV32IAXCHERI-NEXT: clr.c.aqrl ca5, (ca0) -; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 -; RV32IAXCHERI-NEXT: beqz a6, .LBB169_5 -; RV32IAXCHERI-NEXT: # %bb.4: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # in Loop: Header=BB169_3 Depth=2 -; RV32IAXCHERI-NEXT: csc.c.aqrl a6, ca4, (ca0) -; RV32IAXCHERI-NEXT: bnez a6, .LBB169_3 -; RV32IAXCHERI-NEXT: .LBB169_5: # %atomicrmw.start -; RV32IAXCHERI-NEXT: # in Loop: Header=BB169_1 Depth=1 -; RV32IAXCHERI-NEXT: cseqx a6, ca5, ca3 -; RV32IAXCHERI-NEXT: mv a4, a5 -; RV32IAXCHERI-NEXT: cgethigh a3, ca5 -; RV32IAXCHERI-NEXT: beqz a6, .LBB169_1 -; RV32IAXCHERI-NEXT: # %bb.2: # %atomicrmw.end -; RV32IAXCHERI-NEXT: mv a0, a4 -; RV32IAXCHERI-NEXT: mv a1, a3 +; RV32IAXCHERI-NEXT: cincoffset ca1, cnull, a1 +; RV32IAXCHERI-NEXT: csethigh ca1, ca1, a2 +; RV32IAXCHERI-NEXT: camoswap.c.aqrl ca1, ca1, (ca0) +; RV32IAXCHERI-NEXT: mv a0, a1 +; RV32IAXCHERI-NEXT: cgethigh a1, ca1 ; RV32IAXCHERI-NEXT: cret ; ; RV64IXCHERI-LABEL: atomicrmw_xchg_i64_seq_cst: From 25303f9633ddd4639ac5327f79dabb60b6f57174 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Thu, 21 Sep 2023 12:39:31 -0700 Subject: [PATCH 12/18] [CHERI-RISC-V] Support atomic load/store with capability pointers We were just missing the necessary tablegen patterns to support this. --- llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td | 27 ++ .../Inputs/atomic-load-store-cap-ptr.ll | 60 +++ .../MIPS/atomic-load-store-cap-ptr.ll | 203 +++++++++ .../RISCV32/atomic-load-store-cap-ptr.ll | 427 ++++++++++++++++++ .../RISCV64/atomic-load-store-cap-ptr.ll | 427 ++++++++++++++++++ 5 files changed, 1144 insertions(+) create mode 100644 llvm/test/CodeGen/CHERI-Generic/Inputs/atomic-load-store-cap-ptr.ll create mode 100644 llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-load-store-cap-ptr.ll create mode 100644 llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-load-store-cap-ptr.ll create mode 100644 llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-load-store-cap-ptr.ll diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td index 63ee0b4f6b09..9f3948ac98e9 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td @@ -1410,6 +1410,10 @@ multiclass CheriExplicitStPat; } +multiclass CheriExplicitAtomicStPat + : CheriExplicitStPat, Inst, StTy, AddrTy>; + /// DDC-relative loads let Predicates = [HasCheri, IsCapMode] in { @@ -1994,6 +1998,29 @@ defm : CheriAtomicStPat; defm : CheriAMOCapPat<"128", "atomic_swap_cap", "CAMOSWAP_C">; } // Predicates = [HasCheri, HasStdExtA, IsRV64, IsCapMode] +// Hybrid-mode atomic capability-based loads and stores +// As above, fences will be inserted for atomic load/stores according to the +// logic in RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}. +let Predicates = [HasCheri, HasStdExtA, NotCapMode] in { +defm : CheriExplicitLdPat; +defm : CheriExplicitLdPat; +defm : CheriExplicitLdPat; +defm : CheriExplicitAtomicStPat; +defm : CheriExplicitAtomicStPat; +defm : CheriExplicitAtomicStPat; +} // Predicates = [HasCheri, HasStdExtA, NotCapMode] +let Predicates = [HasCheri, HasStdExtA, IsRV32, NotCapMode] in { +defm : CheriExplicitLdPat; +defm : CheriExplicitAtomicStPat; +} // Predicates = [HasCheri, HasStdExtA, IsRV32, NotCapMode] +let Predicates = [HasCheri, HasStdExtA, IsRV64, NotCapMode] in { +defm : CheriExplicitLdPat; +defm : CheriExplicitAtomicStPat; +defm : CheriExplicitLdPat; +defm : CheriExplicitAtomicStPat; +} // Predicates = [HasCheri, HasStdExtA, IsRV64, NotCapMode] + + /// 'F' (Single-Precision Floating-Point) extension let Predicates = [HasCheri, HasStdExtF, IsCapMode] in { diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/atomic-load-store-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/atomic-load-store-cap-ptr.ll new file mode 100644 index 000000000000..693214f7c2b2 --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/atomic-load-store-cap-ptr.ll @@ -0,0 +1,60 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; Check that we can generate sensible code for atomic operations using capability pointers +; https://github.com/CTSRD-CHERI/llvm-project/issues/470 +@IF-RISCV@; RUN: llc @PURECAP_HARDFLOAT_ARGS@ -mattr=+a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-ATOMICS --allow-unused-prefixes +@IF-RISCV@; RUN: llc @PURECAP_HARDFLOAT_ARGS@ -mattr=-a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-LIBCALLS --allow-unused-prefixes +@IFNOT-RISCV@; RUN: llc @PURECAP_HARDFLOAT_ARGS@ %s -o - | FileCheck %s --check-prefix=PURECAP +@IF-RISCV@; RUN: llc @HYBRID_HARDFLOAT_ARGS@ -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-ATOMICS --allow-unused-prefixes +@IF-RISCV@; RUN: llc @HYBRID_HARDFLOAT_ARGS@ -mattr=-a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-LIBCALLS --allow-unused-prefixes +@IFNOT-RISCV@; RUN: llc @HYBRID_HARDFLOAT_ARGS@ %s -o - | FileCheck %s --check-prefix=HYBRID + + +define i8 @load_8(ptr addrspace(200) %ptr) nounwind { + %val = load atomic i8, ptr addrspace(200) %ptr seq_cst, align 1 + ret i8 %val +} + +define i16 @load_16(ptr addrspace(200) %ptr) nounwind { + %val = load atomic i16, ptr addrspace(200) %ptr seq_cst, align 2 + ret i16 %val +} + +define i32 @load_32(ptr addrspace(200) %ptr) nounwind { + %val = load atomic i32, ptr addrspace(200) %ptr seq_cst, align 4 + ret i32 %val +} + +define iCAPRANGE @load_range(ptr addrspace(200) %ptr) nounwind { + %val = load atomic iCAPRANGE, ptr addrspace(200) %ptr seq_cst, align @CAP_RANGE_BYTES@ + ret iCAPRANGE %val +} + +define ptr addrspace(200) @load_cap(ptr addrspace(200) %ptr) nounwind { + %val = load atomic ptr addrspace(200), ptr addrspace(200) %ptr seq_cst, align @CAP_BYTES@ + ret ptr addrspace(200) %val +} + +define i8 @store_8(ptr addrspace(200) %ptr, i8 %val) nounwind { + store atomic i8 %val, ptr addrspace(200) %ptr seq_cst, align 1 + ret i8 %val +} + +define i16 @store_16(ptr addrspace(200) %ptr, i16 %val) nounwind { + store atomic i16 %val, ptr addrspace(200) %ptr seq_cst, align 2 + ret i16 %val +} + +define i32 @store_32(ptr addrspace(200) %ptr, i32 %val) nounwind { + store atomic i32 %val, ptr addrspace(200) %ptr seq_cst, align 4 + ret i32 %val +} + +define iCAPRANGE @store_range(ptr addrspace(200) %ptr, iCAPRANGE %val) nounwind { + store atomic iCAPRANGE %val, ptr addrspace(200) %ptr seq_cst, align @CAP_RANGE_BYTES@ + ret iCAPRANGE %val +} + +define ptr addrspace(200) @store_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %val) nounwind { + store atomic ptr addrspace(200) %val, ptr addrspace(200) %ptr seq_cst, align @CAP_BYTES@ + ret ptr addrspace(200) %val +} diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-load-store-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-load-store-cap-ptr.ll new file mode 100644 index 000000000000..56845280774c --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/atomic-load-store-cap-ptr.ll @@ -0,0 +1,203 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/atomic-load-store-cap-ptr.ll +; Check that we can generate sensible code for atomic operations using capability pointers +; https://github.com/CTSRD-CHERI/llvm-project/issues/470 +; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap %s -o - | FileCheck %s --check-prefix=PURECAP +; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi n64 %s -o - | FileCheck %s --check-prefix=HYBRID + + +define i8 @load_8(ptr addrspace(200) %ptr) nounwind { +; PURECAP-LABEL: load_8: +; PURECAP: # %bb.0: +; PURECAP-NEXT: clb $2, $zero, 0($c3) +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: load_8: +; HYBRID: # %bb.0: +; HYBRID-NEXT: clb $2, $zero, 0($c3) +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %val = load atomic i8, ptr addrspace(200) %ptr seq_cst, align 1 + ret i8 %val +} + +define i16 @load_16(ptr addrspace(200) %ptr) nounwind { +; PURECAP-LABEL: load_16: +; PURECAP: # %bb.0: +; PURECAP-NEXT: clh $2, $zero, 0($c3) +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: load_16: +; HYBRID: # %bb.0: +; HYBRID-NEXT: clh $2, $zero, 0($c3) +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %val = load atomic i16, ptr addrspace(200) %ptr seq_cst, align 2 + ret i16 %val +} + +define i32 @load_32(ptr addrspace(200) %ptr) nounwind { +; PURECAP-LABEL: load_32: +; PURECAP: # %bb.0: +; PURECAP-NEXT: clw $2, $zero, 0($c3) +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: load_32: +; HYBRID: # %bb.0: +; HYBRID-NEXT: clw $2, $zero, 0($c3) +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %val = load atomic i32, ptr addrspace(200) %ptr seq_cst, align 4 + ret i32 %val +} + +define i64 @load_range(ptr addrspace(200) %ptr) nounwind { +; PURECAP-LABEL: load_range: +; PURECAP: # %bb.0: +; PURECAP-NEXT: cld $2, $zero, 0($c3) +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: load_range: +; HYBRID: # %bb.0: +; HYBRID-NEXT: cld $2, $zero, 0($c3) +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %val = load atomic i64, ptr addrspace(200) %ptr seq_cst, align 8 + ret i64 %val +} + +define ptr addrspace(200) @load_cap(ptr addrspace(200) %ptr) nounwind { +; PURECAP-LABEL: load_cap: +; PURECAP: # %bb.0: +; PURECAP-NEXT: clc $c3, $zero, 0($c3) +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: load_cap: +; HYBRID: # %bb.0: +; HYBRID-NEXT: clc $c3, $zero, 0($c3) +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + %val = load atomic ptr addrspace(200), ptr addrspace(200) %ptr seq_cst, align 16 + ret ptr addrspace(200) %val +} + +define i8 @store_8(ptr addrspace(200) %ptr, i8 %val) nounwind { +; PURECAP-LABEL: store_8: +; PURECAP: # %bb.0: +; PURECAP-NEXT: sync +; PURECAP-NEXT: sll $2, $4, 0 +; PURECAP-NEXT: csb $2, $zero, 0($c3) +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: store_8: +; HYBRID: # %bb.0: +; HYBRID-NEXT: sync +; HYBRID-NEXT: sll $2, $4, 0 +; HYBRID-NEXT: csb $2, $zero, 0($c3) +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + store atomic i8 %val, ptr addrspace(200) %ptr seq_cst, align 1 + ret i8 %val +} + +define i16 @store_16(ptr addrspace(200) %ptr, i16 %val) nounwind { +; PURECAP-LABEL: store_16: +; PURECAP: # %bb.0: +; PURECAP-NEXT: sync +; PURECAP-NEXT: sll $2, $4, 0 +; PURECAP-NEXT: csh $2, $zero, 0($c3) +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: store_16: +; HYBRID: # %bb.0: +; HYBRID-NEXT: sync +; HYBRID-NEXT: sll $2, $4, 0 +; HYBRID-NEXT: csh $2, $zero, 0($c3) +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + store atomic i16 %val, ptr addrspace(200) %ptr seq_cst, align 2 + ret i16 %val +} + +define i32 @store_32(ptr addrspace(200) %ptr, i32 %val) nounwind { +; PURECAP-LABEL: store_32: +; PURECAP: # %bb.0: +; PURECAP-NEXT: sll $2, $4, 0 +; PURECAP-NEXT: sync +; PURECAP-NEXT: csw $2, $zero, 0($c3) +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: nop +; +; HYBRID-LABEL: store_32: +; HYBRID: # %bb.0: +; HYBRID-NEXT: sll $2, $4, 0 +; HYBRID-NEXT: sync +; HYBRID-NEXT: csw $2, $zero, 0($c3) +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: nop + store atomic i32 %val, ptr addrspace(200) %ptr seq_cst, align 4 + ret i32 %val +} + +define i64 @store_range(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-LABEL: store_range: +; PURECAP: # %bb.0: +; PURECAP-NEXT: sync +; PURECAP-NEXT: csd $4, $zero, 0($c3) +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: move $2, $4 +; +; HYBRID-LABEL: store_range: +; HYBRID: # %bb.0: +; HYBRID-NEXT: sync +; HYBRID-NEXT: csd $4, $zero, 0($c3) +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: move $2, $4 + store atomic i64 %val, ptr addrspace(200) %ptr seq_cst, align 8 + ret i64 %val +} + +define ptr addrspace(200) @store_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %val) nounwind { +; PURECAP-LABEL: store_cap: +; PURECAP: # %bb.0: +; PURECAP-NEXT: sync +; PURECAP-NEXT: csc $c4, $zero, 0($c3) +; PURECAP-NEXT: sync +; PURECAP-NEXT: cjr $c17 +; PURECAP-NEXT: cmove $c3, $c4 +; +; HYBRID-LABEL: store_cap: +; HYBRID: # %bb.0: +; HYBRID-NEXT: sync +; HYBRID-NEXT: csc $c4, $zero, 0($c3) +; HYBRID-NEXT: sync +; HYBRID-NEXT: jr $ra +; HYBRID-NEXT: cmove $c3, $c4 + store atomic ptr addrspace(200) %val, ptr addrspace(200) %ptr seq_cst, align 16 + ret ptr addrspace(200) %val +} diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-load-store-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-load-store-cap-ptr.ll new file mode 100644 index 000000000000..d4689bb22c9c --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-load-store-cap-ptr.ll @@ -0,0 +1,427 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/atomic-load-store-cap-ptr.ll +; Check that we can generate sensible code for atomic operations using capability pointers +; https://github.com/CTSRD-CHERI/llvm-project/issues/470 +; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -mattr=+a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-ATOMICS --allow-unused-prefixes +; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -mattr=-a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-LIBCALLS --allow-unused-prefixes +; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-ATOMICS --allow-unused-prefixes +; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi ilp32f -mattr=+xcheri,+f -mattr=-a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-LIBCALLS --allow-unused-prefixes + + +define i8 @load_8(ptr addrspace(200) %ptr) nounwind { +; PURECAP-ATOMICS-LABEL: load_8: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clb a0, 0(ca0) +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: load_8: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_1 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load_8: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lb.cap a0, (ca0) +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load_8: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_1_c@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + %val = load atomic i8, ptr addrspace(200) %ptr seq_cst, align 1 + ret i8 %val +} + +define i16 @load_16(ptr addrspace(200) %ptr) nounwind { +; PURECAP-ATOMICS-LABEL: load_16: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clh a0, 0(ca0) +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: load_16: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_2 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load_16: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lh.cap a0, (ca0) +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load_16: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_2_c@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + %val = load atomic i16, ptr addrspace(200) %ptr seq_cst, align 2 + ret i16 %val +} + +define i32 @load_32(ptr addrspace(200) %ptr) nounwind { +; PURECAP-ATOMICS-LABEL: load_32: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clw a0, 0(ca0) +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: load_32: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_4 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load_32: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lw.cap a0, (ca0) +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load_32: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_4_c@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + %val = load atomic i32, ptr addrspace(200) %ptr seq_cst, align 4 + ret i32 %val +} + +define i32 @load_range(ptr addrspace(200) %ptr) nounwind { +; PURECAP-ATOMICS-LABEL: load_range: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clw a0, 0(ca0) +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: load_range: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_4 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load_range: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lw.cap a0, (ca0) +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load_range: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_4_c@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + %val = load atomic i32, ptr addrspace(200) %ptr seq_cst, align 4 + ret i32 %val +} + +define ptr addrspace(200) @load_cap(ptr addrspace(200) %ptr) nounwind { +; PURECAP-ATOMICS-LABEL: load_cap: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clc ca0, 0(ca0) +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: load_cap: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_cap +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load_cap: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lc.cap ca0, (ca0) +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load_cap: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_cap_c@plt +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + %val = load atomic ptr addrspace(200), ptr addrspace(200) %ptr seq_cst, align 8 + ret ptr addrspace(200) %val +} + +define i8 @store_8(ptr addrspace(200) %ptr, i8 %val) nounwind { +; PURECAP-ATOMICS-LABEL: store_8: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: csb a1, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: store_8: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 0(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv s0, a1 +; PURECAP-LIBCALLS-NEXT: li a2, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_1 +; PURECAP-LIBCALLS-NEXT: mv a0, s0 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 0(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store_8: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: sb.cap a1, (ca0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store_8: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a1 +; HYBRID-LIBCALLS-NEXT: li a2, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_1_c@plt +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + store atomic i8 %val, ptr addrspace(200) %ptr seq_cst, align 1 + ret i8 %val +} + +define i16 @store_16(ptr addrspace(200) %ptr, i16 %val) nounwind { +; PURECAP-ATOMICS-LABEL: store_16: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: csh a1, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: store_16: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 0(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv s0, a1 +; PURECAP-LIBCALLS-NEXT: li a2, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_2 +; PURECAP-LIBCALLS-NEXT: mv a0, s0 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 0(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store_16: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: sh.cap a1, (ca0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store_16: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a1 +; HYBRID-LIBCALLS-NEXT: li a2, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_2_c@plt +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + store atomic i16 %val, ptr addrspace(200) %ptr seq_cst, align 2 + ret i16 %val +} + +define i32 @store_32(ptr addrspace(200) %ptr, i32 %val) nounwind { +; PURECAP-ATOMICS-LABEL: store_32: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: csw a1, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: store_32: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 0(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv s0, a1 +; PURECAP-LIBCALLS-NEXT: li a2, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_4 +; PURECAP-LIBCALLS-NEXT: mv a0, s0 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 0(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store_32: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: sw.cap a1, (ca0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store_32: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a1 +; HYBRID-LIBCALLS-NEXT: li a2, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_4_c@plt +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + store atomic i32 %val, ptr addrspace(200) %ptr seq_cst, align 4 + ret i32 %val +} + +define i32 @store_range(ptr addrspace(200) %ptr, i32 %val) nounwind { +; PURECAP-ATOMICS-LABEL: store_range: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: csw a1, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: store_range: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 0(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv s0, a1 +; PURECAP-LIBCALLS-NEXT: li a2, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_4 +; PURECAP-LIBCALLS-NEXT: mv a0, s0 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 0(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store_range: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: sw.cap a1, (ca0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store_range: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a1 +; HYBRID-LIBCALLS-NEXT: li a2, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_4_c@plt +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + store atomic i32 %val, ptr addrspace(200) %ptr seq_cst, align 4 + ret i32 %val +} + +define ptr addrspace(200) @store_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %val) nounwind { +; PURECAP-ATOMICS-LABEL: store_cap: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: csc ca1, 0(ca0) +; PURECAP-ATOMICS-NEXT: cmove ca0, ca1 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: store_cap: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 8(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 0(csp) # 8-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: cmove cs0, ca1 +; PURECAP-LIBCALLS-NEXT: li a2, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_cap +; PURECAP-LIBCALLS-NEXT: cmove ca0, cs0 +; PURECAP-LIBCALLS-NEXT: clc cra, 8(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 0(csp) # 8-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store_cap: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: sc.cap ca1, (ca0) +; HYBRID-ATOMICS-NEXT: cmove ca0, ca1 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store_cap: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sc ca1, 0(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a2, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_cap_c@plt +; HYBRID-LIBCALLS-NEXT: lc ca0, 0(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + store atomic ptr addrspace(200) %val, ptr addrspace(200) %ptr seq_cst, align 8 + ret ptr addrspace(200) %val +} diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-load-store-cap-ptr.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-load-store-cap-ptr.ll new file mode 100644 index 000000000000..c580bf97d8f5 --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-load-store-cap-ptr.ll @@ -0,0 +1,427 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes --force-update +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/atomic-load-store-cap-ptr.ll +; Check that we can generate sensible code for atomic operations using capability pointers +; https://github.com/CTSRD-CHERI/llvm-project/issues/470 +; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -mattr=+a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-ATOMICS --allow-unused-prefixes +; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -mattr=-a < %s | FileCheck %s --check-prefixes=PURECAP,PURECAP-LIBCALLS --allow-unused-prefixes +; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -mattr=+a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-ATOMICS --allow-unused-prefixes +; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi lp64d -mattr=+xcheri,+f,+d -mattr=-a < %s | FileCheck %s --check-prefixes=HYBRID,HYBRID-LIBCALLS --allow-unused-prefixes + + +define i8 @load_8(ptr addrspace(200) %ptr) nounwind { +; PURECAP-ATOMICS-LABEL: load_8: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clb a0, 0(ca0) +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: load_8: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_1 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load_8: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lb.cap a0, (ca0) +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load_8: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_1_c@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + %val = load atomic i8, ptr addrspace(200) %ptr seq_cst, align 1 + ret i8 %val +} + +define i16 @load_16(ptr addrspace(200) %ptr) nounwind { +; PURECAP-ATOMICS-LABEL: load_16: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clh a0, 0(ca0) +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: load_16: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_2 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load_16: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lh.cap a0, (ca0) +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load_16: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_2_c@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + %val = load atomic i16, ptr addrspace(200) %ptr seq_cst, align 2 + ret i16 %val +} + +define i32 @load_32(ptr addrspace(200) %ptr) nounwind { +; PURECAP-ATOMICS-LABEL: load_32: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clw a0, 0(ca0) +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: load_32: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_4 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load_32: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lw.cap a0, (ca0) +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load_32: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_4_c@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + %val = load atomic i32, ptr addrspace(200) %ptr seq_cst, align 4 + ret i32 %val +} + +define i64 @load_range(ptr addrspace(200) %ptr) nounwind { +; PURECAP-ATOMICS-LABEL: load_range: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: cld a0, 0(ca0) +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: load_range: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_8 +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load_range: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: ld.cap a0, (ca0) +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load_range: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_8_c@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + %val = load atomic i64, ptr addrspace(200) %ptr seq_cst, align 8 + ret i64 %val +} + +define ptr addrspace(200) @load_cap(ptr addrspace(200) %ptr) nounwind { +; PURECAP-ATOMICS-LABEL: load_cap: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, rw +; PURECAP-ATOMICS-NEXT: clc ca0, 0(ca0) +; PURECAP-ATOMICS-NEXT: fence r, rw +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: load_cap: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -16 +; PURECAP-LIBCALLS-NEXT: csc cra, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: li a1, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_load_cap +; PURECAP-LIBCALLS-NEXT: clc cra, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 16 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: load_cap: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, rw +; HYBRID-ATOMICS-NEXT: lc.cap ca0, (ca0) +; HYBRID-ATOMICS-NEXT: fence r, rw +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: load_cap: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a1, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_load_cap_c@plt +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + %val = load atomic ptr addrspace(200), ptr addrspace(200) %ptr seq_cst, align 16 + ret ptr addrspace(200) %val +} + +define i8 @store_8(ptr addrspace(200) %ptr, i8 %val) nounwind { +; PURECAP-ATOMICS-LABEL: store_8: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: csb a1, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: store_8: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv s0, a1 +; PURECAP-LIBCALLS-NEXT: li a2, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_1 +; PURECAP-LIBCALLS-NEXT: mv a0, s0 +; PURECAP-LIBCALLS-NEXT: clc cra, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store_8: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: sb.cap a1, (ca0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store_8: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a1 +; HYBRID-LIBCALLS-NEXT: li a2, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_1_c@plt +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + store atomic i8 %val, ptr addrspace(200) %ptr seq_cst, align 1 + ret i8 %val +} + +define i16 @store_16(ptr addrspace(200) %ptr, i16 %val) nounwind { +; PURECAP-ATOMICS-LABEL: store_16: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: csh a1, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: store_16: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv s0, a1 +; PURECAP-LIBCALLS-NEXT: li a2, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_2 +; PURECAP-LIBCALLS-NEXT: mv a0, s0 +; PURECAP-LIBCALLS-NEXT: clc cra, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store_16: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: sh.cap a1, (ca0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store_16: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a1 +; HYBRID-LIBCALLS-NEXT: li a2, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_2_c@plt +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + store atomic i16 %val, ptr addrspace(200) %ptr seq_cst, align 2 + ret i16 %val +} + +define i32 @store_32(ptr addrspace(200) %ptr, i32 %val) nounwind { +; PURECAP-ATOMICS-LABEL: store_32: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: csw a1, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: store_32: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv s0, a1 +; PURECAP-LIBCALLS-NEXT: li a2, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_4 +; PURECAP-LIBCALLS-NEXT: mv a0, s0 +; PURECAP-LIBCALLS-NEXT: clc cra, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store_32: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: sw.cap a1, (ca0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store_32: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a1 +; HYBRID-LIBCALLS-NEXT: li a2, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_4_c@plt +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + store atomic i32 %val, ptr addrspace(200) %ptr seq_cst, align 4 + ret i32 %val +} + +define i64 @store_range(ptr addrspace(200) %ptr, i64 %val) nounwind { +; PURECAP-ATOMICS-LABEL: store_range: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: csd a1, 0(ca0) +; PURECAP-ATOMICS-NEXT: mv a0, a1 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: store_range: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: mv s0, a1 +; PURECAP-LIBCALLS-NEXT: li a2, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_8 +; PURECAP-LIBCALLS-NEXT: mv a0, s0 +; PURECAP-LIBCALLS-NEXT: clc cra, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store_range: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: sd.cap a1, (ca0) +; HYBRID-ATOMICS-NEXT: mv a0, a1 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store_range: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: mv s0, a1 +; HYBRID-LIBCALLS-NEXT: li a2, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_8_c@plt +; HYBRID-LIBCALLS-NEXT: mv a0, s0 +; HYBRID-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-LIBCALLS-NEXT: ret + store atomic i64 %val, ptr addrspace(200) %ptr seq_cst, align 8 + ret i64 %val +} + +define ptr addrspace(200) @store_cap(ptr addrspace(200) %ptr, ptr addrspace(200) %val) nounwind { +; PURECAP-ATOMICS-LABEL: store_cap: +; PURECAP-ATOMICS: # %bb.0: +; PURECAP-ATOMICS-NEXT: fence rw, w +; PURECAP-ATOMICS-NEXT: csc ca1, 0(ca0) +; PURECAP-ATOMICS-NEXT: cmove ca0, ca1 +; PURECAP-ATOMICS-NEXT: cret +; +; PURECAP-LIBCALLS-LABEL: store_cap: +; PURECAP-LIBCALLS: # %bb.0: +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, -32 +; PURECAP-LIBCALLS-NEXT: csc cra, 16(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: csc cs0, 0(csp) # 16-byte Folded Spill +; PURECAP-LIBCALLS-NEXT: cmove cs0, ca1 +; PURECAP-LIBCALLS-NEXT: li a2, 5 +; PURECAP-LIBCALLS-NEXT: ccall __atomic_store_cap +; PURECAP-LIBCALLS-NEXT: cmove ca0, cs0 +; PURECAP-LIBCALLS-NEXT: clc cra, 16(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: clc cs0, 0(csp) # 16-byte Folded Reload +; PURECAP-LIBCALLS-NEXT: cincoffset csp, csp, 32 +; PURECAP-LIBCALLS-NEXT: cret +; +; HYBRID-ATOMICS-LABEL: store_cap: +; HYBRID-ATOMICS: # %bb.0: +; HYBRID-ATOMICS-NEXT: fence rw, w +; HYBRID-ATOMICS-NEXT: sc.cap ca1, (ca0) +; HYBRID-ATOMICS-NEXT: cmove ca0, ca1 +; HYBRID-ATOMICS-NEXT: ret +; +; HYBRID-LIBCALLS-LABEL: store_cap: +; HYBRID-LIBCALLS: # %bb.0: +; HYBRID-LIBCALLS-NEXT: addi sp, sp, -32 +; HYBRID-LIBCALLS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: sc ca1, 0(sp) # 16-byte Folded Spill +; HYBRID-LIBCALLS-NEXT: li a2, 5 +; HYBRID-LIBCALLS-NEXT: call __atomic_store_cap_c@plt +; HYBRID-LIBCALLS-NEXT: lc ca0, 0(sp) # 16-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-LIBCALLS-NEXT: addi sp, sp, 32 +; HYBRID-LIBCALLS-NEXT: ret + store atomic ptr addrspace(200) %val, ptr addrspace(200) %ptr seq_cst, align 16 + ret ptr addrspace(200) %val +} From f4bd4408f18135a00d865585c3ae81503e6fef18 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Thu, 21 Sep 2023 12:53:48 -0700 Subject: [PATCH 13/18] [CHERI-RISC-V] Support hybrid mode atomic load/store of 2*XLen integers With the latest commit we can lower i64/i128 loads stores as an atomic capability store with the appropriate fences. --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 3 +- .../RISCV32/atomic-cap-size-int.ll | 90 +++++++++++++------ .../RISCV64/atomic-cap-size-int.ll | 90 +++++++++++++------ 3 files changed, 123 insertions(+), 60 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 27216ada7cce..a3e352405d4c 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12919,8 +12919,7 @@ bool RISCVTargetLowering::supportsAtomicOperation(const DataLayout &DL, // Using capability pointers in hybrid mode is not yet supported for this // as we are missing some required patterns. if (Subtarget.hasStdExtA() && Subtarget.hasCheri() && - ValueTy->isIntegerTy(Subtarget.typeForCapabilities().getSizeInBits()) && - DL.isFatPointer(PointerTy) == IsPureCapABI) + ValueTy->isIntegerTy(Subtarget.typeForCapabilities().getSizeInBits())) return true; return TargetLowering::supportsAtomicOperation(DL, AI, ValueTy, PointerTy, Alignment); diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll index 653bec3808d4..75c14f857ce3 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/atomic-cap-size-int.ll @@ -69,23 +69,33 @@ define i64 @store(ptr addrspace(200) %ptr, i64 %val) nounwind { ; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 ; HYBRID-LIBCALLS-NEXT: ret ; -; HYBRID-CAP-PTR-LABEL: store: -; HYBRID-CAP-PTR: # %bb.0: -; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 -; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-CAP-PTR-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; HYBRID-CAP-PTR-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; HYBRID-CAP-PTR-NEXT: mv s0, a2 -; HYBRID-CAP-PTR-NEXT: mv s1, a1 -; HYBRID-CAP-PTR-NEXT: li a3, 5 -; HYBRID-CAP-PTR-NEXT: call __atomic_store_8_c@plt -; HYBRID-CAP-PTR-NEXT: mv a0, s1 -; HYBRID-CAP-PTR-NEXT: mv a1, s0 -; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-CAP-PTR-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; HYBRID-CAP-PTR-NEXT: lw s1, 4(sp) # 4-byte Folded Reload -; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 -; HYBRID-CAP-PTR-NEXT: ret +; HYBRID-CAP-PTR-ATOMICS-LABEL: store: +; HYBRID-CAP-PTR-ATOMICS: # %bb.0: +; HYBRID-CAP-PTR-ATOMICS-NEXT: fence rw, w +; HYBRID-CAP-PTR-ATOMICS-NEXT: cincoffset ca3, cnull, a1 +; HYBRID-CAP-PTR-ATOMICS-NEXT: csethigh ca3, ca3, a2 +; HYBRID-CAP-PTR-ATOMICS-NEXT: sc.cap ca3, (ca0) +; HYBRID-CAP-PTR-ATOMICS-NEXT: mv a0, a1 +; HYBRID-CAP-PTR-ATOMICS-NEXT: mv a1, a2 +; HYBRID-CAP-PTR-ATOMICS-NEXT: ret +; +; HYBRID-CAP-PTR-LIBCALLS-LABEL: store: +; HYBRID-CAP-PTR-LIBCALLS: # %bb.0: +; HYBRID-CAP-PTR-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-LIBCALLS-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-LIBCALLS-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-LIBCALLS-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: li a3, 5 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: call __atomic_store_8_c@plt +; HYBRID-CAP-PTR-LIBCALLS-NEXT: mv a0, s1 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: mv a1, s0 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-LIBCALLS-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-LIBCALLS-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@store ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { ; PURECAP-IR-NEXT: fence release @@ -98,7 +108,12 @@ define i64 @store(ptr addrspace(200) %ptr, i64 %val) nounwind { ; ; HYBRID-IR-LABEL: define {{[^@]+}}@store ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i64 [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { -; HYBRID-IR-NEXT: call void @__atomic_store_8_c(ptr addrspace(200) [[PTR]], i64 [[VAL]], i32 5) +; HYBRID-IR-NEXT: fence release +; HYBRID-IR-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr addrspace(200) null, i64 [[VAL]] +; HYBRID-IR-NEXT: [[TMP2:%.*]] = lshr i64 [[VAL]], 32 +; HYBRID-IR-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32 +; HYBRID-IR-NEXT: [[TMP4:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i32(ptr addrspace(200) [[TMP1]], i32 [[TMP3]]) +; HYBRID-IR-NEXT: store atomic ptr addrspace(200) [[TMP4]], ptr addrspace(200) [[PTR]] monotonic, align 8 ; HYBRID-IR-NEXT: ret i64 [[VAL]] ; store atomic i64 %val, ptr addrspace(200) %ptr seq_cst, align 8 @@ -144,15 +159,24 @@ define i64 @load(ptr addrspace(200) %ptr) nounwind { ; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 ; HYBRID-LIBCALLS-NEXT: ret ; -; HYBRID-CAP-PTR-LABEL: load: -; HYBRID-CAP-PTR: # %bb.0: -; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 -; HYBRID-CAP-PTR-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; HYBRID-CAP-PTR-NEXT: li a1, 5 -; HYBRID-CAP-PTR-NEXT: call __atomic_load_8_c@plt -; HYBRID-CAP-PTR-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 -; HYBRID-CAP-PTR-NEXT: ret +; HYBRID-CAP-PTR-ATOMICS-LABEL: load: +; HYBRID-CAP-PTR-ATOMICS: # %bb.0: +; HYBRID-CAP-PTR-ATOMICS-NEXT: fence rw, rw +; HYBRID-CAP-PTR-ATOMICS-NEXT: lc.cap ca1, (ca0) +; HYBRID-CAP-PTR-ATOMICS-NEXT: mv a0, a1 +; HYBRID-CAP-PTR-ATOMICS-NEXT: cgethigh a1, ca1 +; HYBRID-CAP-PTR-ATOMICS-NEXT: fence r, rw +; HYBRID-CAP-PTR-ATOMICS-NEXT: ret +; +; HYBRID-CAP-PTR-LIBCALLS-LABEL: load: +; HYBRID-CAP-PTR-LIBCALLS: # %bb.0: +; HYBRID-CAP-PTR-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; HYBRID-CAP-PTR-LIBCALLS-NEXT: li a1, 5 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: call __atomic_load_8_c@plt +; HYBRID-CAP-PTR-LIBCALLS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; HYBRID-CAP-PTR-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@load ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) addrspace(200) #[[ATTR0]] { ; PURECAP-IR-NEXT: fence seq_cst @@ -168,8 +192,16 @@ define i64 @load(ptr addrspace(200) %ptr) nounwind { ; ; HYBRID-IR-LABEL: define {{[^@]+}}@load ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) #[[ATTR0]] { -; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i64 @__atomic_load_8_c(ptr addrspace(200) [[PTR]], i32 5) -; HYBRID-IR-NEXT: ret i64 [[TMP1]] +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP1:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[PTR]] monotonic, align 8 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = call i32 @llvm.cheri.cap.address.get.i32(ptr addrspace(200) [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP3:%.*]] = call i32 @llvm.cheri.cap.high.get.i32(ptr addrspace(200) [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP4:%.*]] = zext i32 [[TMP2]] to i64 +; HYBRID-IR-NEXT: [[TMP5:%.*]] = zext i32 [[TMP3]] to i64 +; HYBRID-IR-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 32 +; HYBRID-IR-NEXT: [[TMP7:%.*]] = or i64 [[TMP4]], [[TMP6]] +; HYBRID-IR-NEXT: fence acquire +; HYBRID-IR-NEXT: ret i64 [[TMP7]] ; %val = load atomic i64, ptr addrspace(200) %ptr seq_cst, align 8 ret i64 %val diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll index 28b8e6faeec5..c0d5f8e1ab12 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/atomic-cap-size-int.ll @@ -69,23 +69,33 @@ define i128 @store(ptr addrspace(200) %ptr, i128 %val) nounwind { ; HYBRID-LIBCALLS-NEXT: addi sp, sp, 32 ; HYBRID-LIBCALLS-NEXT: ret ; -; HYBRID-CAP-PTR-LABEL: store: -; HYBRID-CAP-PTR: # %bb.0: -; HYBRID-CAP-PTR-NEXT: addi sp, sp, -32 -; HYBRID-CAP-PTR-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; HYBRID-CAP-PTR-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; HYBRID-CAP-PTR-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; HYBRID-CAP-PTR-NEXT: mv s0, a2 -; HYBRID-CAP-PTR-NEXT: mv s1, a1 -; HYBRID-CAP-PTR-NEXT: li a3, 5 -; HYBRID-CAP-PTR-NEXT: call __atomic_store_16_c@plt -; HYBRID-CAP-PTR-NEXT: mv a0, s1 -; HYBRID-CAP-PTR-NEXT: mv a1, s0 -; HYBRID-CAP-PTR-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; HYBRID-CAP-PTR-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; HYBRID-CAP-PTR-NEXT: ld s1, 8(sp) # 8-byte Folded Reload -; HYBRID-CAP-PTR-NEXT: addi sp, sp, 32 -; HYBRID-CAP-PTR-NEXT: ret +; HYBRID-CAP-PTR-ATOMICS-LABEL: store: +; HYBRID-CAP-PTR-ATOMICS: # %bb.0: +; HYBRID-CAP-PTR-ATOMICS-NEXT: fence rw, w +; HYBRID-CAP-PTR-ATOMICS-NEXT: cincoffset ca3, cnull, a1 +; HYBRID-CAP-PTR-ATOMICS-NEXT: csethigh ca3, ca3, a2 +; HYBRID-CAP-PTR-ATOMICS-NEXT: sc.cap ca3, (ca0) +; HYBRID-CAP-PTR-ATOMICS-NEXT: mv a0, a1 +; HYBRID-CAP-PTR-ATOMICS-NEXT: mv a1, a2 +; HYBRID-CAP-PTR-ATOMICS-NEXT: ret +; +; HYBRID-CAP-PTR-LIBCALLS-LABEL: store: +; HYBRID-CAP-PTR-LIBCALLS: # %bb.0: +; HYBRID-CAP-PTR-LIBCALLS-NEXT: addi sp, sp, -32 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-LIBCALLS-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-LIBCALLS-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-LIBCALLS-NEXT: mv s0, a2 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: mv s1, a1 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: li a3, 5 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: call __atomic_store_16_c@plt +; HYBRID-CAP-PTR-LIBCALLS-NEXT: mv a0, s1 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: mv a1, s0 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-LIBCALLS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-LIBCALLS-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-LIBCALLS-NEXT: addi sp, sp, 32 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@store ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { ; PURECAP-IR-NEXT: fence release @@ -98,7 +108,12 @@ define i128 @store(ptr addrspace(200) %ptr, i128 %val) nounwind { ; ; HYBRID-IR-LABEL: define {{[^@]+}}@store ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]], i128 [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { -; HYBRID-IR-NEXT: call void @__atomic_store_16_c(ptr addrspace(200) [[PTR]], i128 [[VAL]], i32 5) +; HYBRID-IR-NEXT: fence release +; HYBRID-IR-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr addrspace(200) null, i128 [[VAL]] +; HYBRID-IR-NEXT: [[TMP2:%.*]] = lshr i128 [[VAL]], 64 +; HYBRID-IR-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i64 +; HYBRID-IR-NEXT: [[TMP4:%.*]] = call ptr addrspace(200) @llvm.cheri.cap.high.set.i64(ptr addrspace(200) [[TMP1]], i64 [[TMP3]]) +; HYBRID-IR-NEXT: store atomic ptr addrspace(200) [[TMP4]], ptr addrspace(200) [[PTR]] monotonic, align 16 ; HYBRID-IR-NEXT: ret i128 [[VAL]] ; store atomic i128 %val, ptr addrspace(200) %ptr seq_cst, align 16 @@ -144,15 +159,24 @@ define i128 @load(ptr addrspace(200) %ptr) nounwind { ; HYBRID-LIBCALLS-NEXT: addi sp, sp, 16 ; HYBRID-LIBCALLS-NEXT: ret ; -; HYBRID-CAP-PTR-LABEL: load: -; HYBRID-CAP-PTR: # %bb.0: -; HYBRID-CAP-PTR-NEXT: addi sp, sp, -16 -; HYBRID-CAP-PTR-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; HYBRID-CAP-PTR-NEXT: li a1, 5 -; HYBRID-CAP-PTR-NEXT: call __atomic_load_16_c@plt -; HYBRID-CAP-PTR-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; HYBRID-CAP-PTR-NEXT: addi sp, sp, 16 -; HYBRID-CAP-PTR-NEXT: ret +; HYBRID-CAP-PTR-ATOMICS-LABEL: load: +; HYBRID-CAP-PTR-ATOMICS: # %bb.0: +; HYBRID-CAP-PTR-ATOMICS-NEXT: fence rw, rw +; HYBRID-CAP-PTR-ATOMICS-NEXT: lc.cap ca1, (ca0) +; HYBRID-CAP-PTR-ATOMICS-NEXT: mv a0, a1 +; HYBRID-CAP-PTR-ATOMICS-NEXT: cgethigh a1, ca1 +; HYBRID-CAP-PTR-ATOMICS-NEXT: fence r, rw +; HYBRID-CAP-PTR-ATOMICS-NEXT: ret +; +; HYBRID-CAP-PTR-LIBCALLS-LABEL: load: +; HYBRID-CAP-PTR-LIBCALLS: # %bb.0: +; HYBRID-CAP-PTR-LIBCALLS-NEXT: addi sp, sp, -16 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; HYBRID-CAP-PTR-LIBCALLS-NEXT: li a1, 5 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: call __atomic_load_16_c@plt +; HYBRID-CAP-PTR-LIBCALLS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; HYBRID-CAP-PTR-LIBCALLS-NEXT: addi sp, sp, 16 +; HYBRID-CAP-PTR-LIBCALLS-NEXT: ret ; PURECAP-IR-LABEL: define {{[^@]+}}@load ; PURECAP-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) addrspace(200) #[[ATTR0]] { ; PURECAP-IR-NEXT: fence seq_cst @@ -168,8 +192,16 @@ define i128 @load(ptr addrspace(200) %ptr) nounwind { ; ; HYBRID-IR-LABEL: define {{[^@]+}}@load ; HYBRID-IR-SAME: (ptr addrspace(200) [[PTR:%.*]]) #[[ATTR0]] { -; HYBRID-IR-NEXT: [[TMP1:%.*]] = call i128 @__atomic_load_16_c(ptr addrspace(200) [[PTR]], i32 5) -; HYBRID-IR-NEXT: ret i128 [[TMP1]] +; HYBRID-IR-NEXT: fence seq_cst +; HYBRID-IR-NEXT: [[TMP1:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[PTR]] monotonic, align 16 +; HYBRID-IR-NEXT: [[TMP2:%.*]] = call i64 @llvm.cheri.cap.address.get.i64(ptr addrspace(200) [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.cheri.cap.high.get.i64(ptr addrspace(200) [[TMP1]]) +; HYBRID-IR-NEXT: [[TMP4:%.*]] = zext i64 [[TMP2]] to i128 +; HYBRID-IR-NEXT: [[TMP5:%.*]] = zext i64 [[TMP3]] to i128 +; HYBRID-IR-NEXT: [[TMP6:%.*]] = shl i128 [[TMP5]], 64 +; HYBRID-IR-NEXT: [[TMP7:%.*]] = or i128 [[TMP4]], [[TMP6]] +; HYBRID-IR-NEXT: fence acquire +; HYBRID-IR-NEXT: ret i128 [[TMP7]] ; %val = load atomic i128, ptr addrspace(200) %ptr seq_cst, align 16 ret i128 %val From 87e505451beee628ad12c53fd703a9f276cb95ba Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Thu, 21 Sep 2023 16:30:49 -0700 Subject: [PATCH 14/18] [CHERI] Add tests for __atomic_always_lock_free(sizeof(uintptr_t)) We don't yet return true here for purecap CHERI since right now clang will emit libcalls for non-capability sizeof(uintptr_t) operations such as __int128 for 64-bit RISC-V. --- clang/test/CodeGen/cheri/atomic-lock-free.c | 281 ++++++++++++++++++++ clang/test/Sema/cheri/atomic-lock-free.c | 25 ++ 2 files changed, 306 insertions(+) create mode 100644 clang/test/CodeGen/cheri/atomic-lock-free.c create mode 100644 clang/test/Sema/cheri/atomic-lock-free.c diff --git a/clang/test/CodeGen/cheri/atomic-lock-free.c b/clang/test/CodeGen/cheri/atomic-lock-free.c new file mode 100644 index 000000000000..c7dac7b32066 --- /dev/null +++ b/clang/test/CodeGen/cheri/atomic-lock-free.c @@ -0,0 +1,281 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature +/// Check that we emit inline atomics rather than library calls for capability-size atomics +// RUN: %riscv64_cheri_purecap_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=PURECAP64 +// RUN: %riscv64_cheri_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=HYBRID64 +// RUN: %riscv32_cheri_purecap_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=PURECAP32 +// RUN: %riscv32_cheri_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=HYBRID32 + +#if __CHERI_CAPABILITY_WIDTH__ == 64 +typedef __INT64_TYPE__ cap_size_int; +#else +typedef __int128 cap_size_int; +#endif + +// PURECAP64-LABEL: define {{[^@]+}}@load_long +// PURECAP64-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { +// PURECAP64-NEXT: entry: +// PURECAP64-NEXT: [[TMP0:%.*]] = load atomic i64, ptr addrspace(200) [[L]] seq_cst, align 8 +// PURECAP64-NEXT: ret i64 [[TMP0]] +// +// HYBRID64-LABEL: define {{[^@]+}}@load_long +// HYBRID64-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0:[0-9]+]] { +// HYBRID64-NEXT: entry: +// HYBRID64-NEXT: [[TMP0:%.*]] = load atomic i64, ptr [[L]] seq_cst, align 8 +// HYBRID64-NEXT: ret i64 [[TMP0]] +// +// PURECAP32-LABEL: define {{[^@]+}}@load_long +// PURECAP32-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { +// PURECAP32-NEXT: entry: +// PURECAP32-NEXT: [[TMP0:%.*]] = load atomic i32, ptr addrspace(200) [[L]] seq_cst, align 4 +// PURECAP32-NEXT: ret i32 [[TMP0]] +// +// HYBRID32-LABEL: define {{[^@]+}}@load_long +// HYBRID32-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0:[0-9]+]] { +// HYBRID32-NEXT: entry: +// HYBRID32-NEXT: [[TMP0:%.*]] = load atomic i32, ptr [[L]] seq_cst, align 4 +// HYBRID32-NEXT: ret i32 [[TMP0]] +// +long load_long(long* l) { + return __atomic_load_n(l, __ATOMIC_SEQ_CST); +} + +// PURECAP64-LABEL: define {{[^@]+}}@load_cap +// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP64-NEXT: entry: +// PURECAP64-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[I]] seq_cst, align 16 +// PURECAP64-NEXT: ret ptr addrspace(200) [[TMP0]] +// +// HYBRID64-LABEL: define {{[^@]+}}@load_cap +// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { +// HYBRID64-NEXT: entry: +// HYBRID64-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr [[I]] seq_cst, align 16 +// HYBRID64-NEXT: ret ptr addrspace(200) [[TMP0]] +// +// PURECAP32-LABEL: define {{[^@]+}}@load_cap +// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP32-NEXT: entry: +// PURECAP32-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[I]] seq_cst, align 8 +// PURECAP32-NEXT: ret ptr addrspace(200) [[TMP0]] +// +// HYBRID32-LABEL: define {{[^@]+}}@load_cap +// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { +// HYBRID32-NEXT: entry: +// HYBRID32-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr [[I]] seq_cst, align 8 +// HYBRID32-NEXT: ret ptr addrspace(200) [[TMP0]] +// +__intcap load_cap(__intcap* i) { + return __atomic_load_n(i, __ATOMIC_SEQ_CST); +} + +// PURECAP64-LABEL: define {{[^@]+}}@loadi128 +// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP64-NEXT: entry: +// PURECAP64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16, addrspace(200) +// PURECAP64-NEXT: call void @__atomic_load(i64 noundef 16, ptr addrspace(200) noundef [[I]], ptr addrspace(200) noundef [[ATOMIC_TEMP]], i32 noundef signext 5) +// PURECAP64-NEXT: [[TMP0:%.*]] = load i128, ptr addrspace(200) [[ATOMIC_TEMP]], align 16 +// PURECAP64-NEXT: ret i128 [[TMP0]] +// +// HYBRID64-LABEL: define {{[^@]+}}@loadi128 +// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { +// HYBRID64-NEXT: entry: +// HYBRID64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16 +// HYBRID64-NEXT: call void @__atomic_load(i64 noundef 16, ptr noundef [[I]], ptr noundef [[ATOMIC_TEMP]], i32 noundef signext 5) +// HYBRID64-NEXT: [[TMP0:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 16 +// HYBRID64-NEXT: ret i128 [[TMP0]] +// +// PURECAP32-LABEL: define {{[^@]+}}@loadi128 +// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP32-NEXT: entry: +// PURECAP32-NEXT: [[CALL:%.*]] = call i64 @__atomic_load_8(ptr addrspace(200) noundef [[I]], i32 noundef 5) +// PURECAP32-NEXT: ret i64 [[CALL]] +// +// HYBRID32-LABEL: define {{[^@]+}}@loadi128 +// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { +// HYBRID32-NEXT: entry: +// HYBRID32-NEXT: [[CALL:%.*]] = call i64 @__atomic_load_8(ptr noundef [[I]], i32 noundef 5) +// HYBRID32-NEXT: ret i64 [[CALL]] +// +cap_size_int loadi128(cap_size_int* i) { + return __atomic_load_n(i, __ATOMIC_SEQ_CST); +// expected-warning@-1{{large atomic operation may incur significant performance penalty}} +} + +// PURECAP64-LABEL: define {{[^@]+}}@xchg_long +// PURECAP64-SAME: (ptr addrspace(200) noundef [[L:%.*]], i64 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP64-NEXT: entry: +// PURECAP64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[L]], i64 [[VAL]] seq_cst, align 8 +// PURECAP64-NEXT: ret i64 [[TMP0]] +// +// HYBRID64-LABEL: define {{[^@]+}}@xchg_long +// HYBRID64-SAME: (ptr noundef [[L:%.*]], i64 noundef [[VAL:%.*]]) #[[ATTR0]] { +// HYBRID64-NEXT: entry: +// HYBRID64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[L]], i64 [[VAL]] seq_cst, align 8 +// HYBRID64-NEXT: ret i64 [[TMP0]] +// +// PURECAP32-LABEL: define {{[^@]+}}@xchg_long +// PURECAP32-SAME: (ptr addrspace(200) noundef [[L:%.*]], i32 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP32-NEXT: entry: +// PURECAP32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[L]], i32 [[VAL]] seq_cst, align 4 +// PURECAP32-NEXT: ret i32 [[TMP0]] +// +// HYBRID32-LABEL: define {{[^@]+}}@xchg_long +// HYBRID32-SAME: (ptr noundef [[L:%.*]], i32 noundef [[VAL:%.*]]) #[[ATTR0]] { +// HYBRID32-NEXT: entry: +// HYBRID32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[L]], i32 [[VAL]] seq_cst, align 4 +// HYBRID32-NEXT: ret i32 [[TMP0]] +// +long xchg_long(long* l, long val) { + return __atomic_exchange_n(l, val, __ATOMIC_SEQ_CST); +} + +// PURECAP64-LABEL: define {{[^@]+}}@xchg_cap +// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP64-NEXT: entry: +// PURECAP64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 16 +// PURECAP64-NEXT: ret ptr addrspace(200) [[TMP0]] +// +// HYBRID64-LABEL: define {{[^@]+}}@xchg_cap +// HYBRID64-SAME: (ptr noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) #[[ATTR0]] { +// HYBRID64-NEXT: entry: +// HYBRID64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 16 +// HYBRID64-NEXT: ret ptr addrspace(200) [[TMP0]] +// +// PURECAP32-LABEL: define {{[^@]+}}@xchg_cap +// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP32-NEXT: entry: +// PURECAP32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 8 +// PURECAP32-NEXT: ret ptr addrspace(200) [[TMP0]] +// +// HYBRID32-LABEL: define {{[^@]+}}@xchg_cap +// HYBRID32-SAME: (ptr noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) #[[ATTR0]] { +// HYBRID32-NEXT: entry: +// HYBRID32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 8 +// HYBRID32-NEXT: ret ptr addrspace(200) [[TMP0]] +// +__intcap xchg_cap(__intcap* i, __intcap val) { + return __atomic_exchange_n(i, val, __ATOMIC_SEQ_CST); +} + +// PURECAP64-LABEL: define {{[^@]+}}@xchg_i128 +// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]], i128 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP64-NEXT: entry: +// PURECAP64-NEXT: [[DOTATOMICTMP:%.*]] = alloca i128, align 16, addrspace(200) +// PURECAP64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16, addrspace(200) +// PURECAP64-NEXT: store i128 [[VAL]], ptr addrspace(200) [[DOTATOMICTMP]], align 16 +// PURECAP64-NEXT: call void @__atomic_exchange(i64 noundef 16, ptr addrspace(200) noundef [[I]], ptr addrspace(200) noundef [[DOTATOMICTMP]], ptr addrspace(200) noundef [[ATOMIC_TEMP]], i32 noundef signext 5) +// PURECAP64-NEXT: [[TMP0:%.*]] = load i128, ptr addrspace(200) [[ATOMIC_TEMP]], align 16 +// PURECAP64-NEXT: ret i128 [[TMP0]] +// +// HYBRID64-LABEL: define {{[^@]+}}@xchg_i128 +// HYBRID64-SAME: (ptr noundef [[I:%.*]], i128 noundef [[VAL:%.*]]) #[[ATTR0]] { +// HYBRID64-NEXT: entry: +// HYBRID64-NEXT: [[DOTATOMICTMP:%.*]] = alloca i128, align 16 +// HYBRID64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16 +// HYBRID64-NEXT: store i128 [[VAL]], ptr [[DOTATOMICTMP]], align 16 +// HYBRID64-NEXT: call void @__atomic_exchange(i64 noundef 16, ptr noundef [[I]], ptr noundef [[DOTATOMICTMP]], ptr noundef [[ATOMIC_TEMP]], i32 noundef signext 5) +// HYBRID64-NEXT: [[TMP0:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 16 +// HYBRID64-NEXT: ret i128 [[TMP0]] +// +// PURECAP32-LABEL: define {{[^@]+}}@xchg_i128 +// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]], i64 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP32-NEXT: entry: +// PURECAP32-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(ptr addrspace(200) noundef [[I]], i64 noundef [[VAL]], i32 noundef 5) +// PURECAP32-NEXT: ret i64 [[CALL]] +// +// HYBRID32-LABEL: define {{[^@]+}}@xchg_i128 +// HYBRID32-SAME: (ptr noundef [[I:%.*]], i64 noundef [[VAL:%.*]]) #[[ATTR0]] { +// HYBRID32-NEXT: entry: +// HYBRID32-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(ptr noundef [[I]], i64 noundef [[VAL]], i32 noundef 5) +// HYBRID32-NEXT: ret i64 [[CALL]] +// +cap_size_int xchg_i128(cap_size_int* i, cap_size_int val) { + return __atomic_exchange_n(i, val, __ATOMIC_SEQ_CST); + // expected-warning@-1{{large atomic operation may incur significant performance penalty}} +} + +// PURECAP64-LABEL: define {{[^@]+}}@lock_free_long +// PURECAP64-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP64-NEXT: entry: +// PURECAP64-NEXT: ret i1 true +// +// HYBRID64-LABEL: define {{[^@]+}}@lock_free_long +// HYBRID64-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0]] { +// HYBRID64-NEXT: entry: +// HYBRID64-NEXT: ret i1 true +// +// PURECAP32-LABEL: define {{[^@]+}}@lock_free_long +// PURECAP32-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP32-NEXT: entry: +// PURECAP32-NEXT: ret i1 true +// +// HYBRID32-LABEL: define {{[^@]+}}@lock_free_long +// HYBRID32-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0]] { +// HYBRID32-NEXT: entry: +// HYBRID32-NEXT: ret i1 true +// +_Bool lock_free_long(long* l) { + _Static_assert(__atomic_always_lock_free(sizeof(*l), 0), ""); + return __atomic_is_lock_free(sizeof(*l), l); +} + +// +// FIXME: should return true here +// PURECAP64-LABEL: define {{[^@]+}}@lock_free_cap +// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP64-NEXT: entry: +// PURECAP64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr addrspace(200) noundef [[I]]) +// PURECAP64-NEXT: ret i1 [[CALL]] +// +// HYBRID64-LABEL: define {{[^@]+}}@lock_free_cap +// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { +// HYBRID64-NEXT: entry: +// HYBRID64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef [[I]]) +// HYBRID64-NEXT: ret i1 [[CALL]] +// +// PURECAP32-LABEL: define {{[^@]+}}@lock_free_cap +// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP32-NEXT: entry: +// PURECAP32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr addrspace(200) noundef [[I]]) +// PURECAP32-NEXT: ret i1 [[CALL]] +// +// HYBRID32-LABEL: define {{[^@]+}}@lock_free_cap +// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { +// HYBRID32-NEXT: entry: +// HYBRID32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr noundef [[I]]) +// HYBRID32-NEXT: ret i1 [[CALL]] +// +_Bool lock_free_cap(__intcap* i) { + // TODO: _Static_assert(__atomic_always_lock_free(sizeof(*i), 0), ""); + return __atomic_is_lock_free(sizeof(*i), i); +} + +// +// FIXME: should return true here +// PURECAP64-LABEL: define {{[^@]+}}@lock_free_i128 +// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP64-NEXT: entry: +// PURECAP64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr addrspace(200) noundef [[I]]) +// PURECAP64-NEXT: ret i1 [[CALL]] +// +// HYBRID64-LABEL: define {{[^@]+}}@lock_free_i128 +// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { +// HYBRID64-NEXT: entry: +// HYBRID64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef [[I]]) +// HYBRID64-NEXT: ret i1 [[CALL]] +// +// PURECAP32-LABEL: define {{[^@]+}}@lock_free_i128 +// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP32-NEXT: entry: +// PURECAP32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr addrspace(200) noundef [[I]]) +// PURECAP32-NEXT: ret i1 [[CALL]] +// +// HYBRID32-LABEL: define {{[^@]+}}@lock_free_i128 +// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { +// HYBRID32-NEXT: entry: +// HYBRID32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr noundef [[I]]) +// HYBRID32-NEXT: ret i1 [[CALL]] +// +_Bool lock_free_i128(cap_size_int* i) { + // TODO: _Static_assert(__atomic_always_lock_free(sizeof(*i), 0), ""); + return __atomic_is_lock_free(sizeof(*i), i); +} diff --git a/clang/test/Sema/cheri/atomic-lock-free.c b/clang/test/Sema/cheri/atomic-lock-free.c new file mode 100644 index 000000000000..e68d322411e3 --- /dev/null +++ b/clang/test/Sema/cheri/atomic-lock-free.c @@ -0,0 +1,25 @@ +/// Check that we report true for __atomic_always_lock_free(sizeof(uintptr_t)). +/// For example libc++'s std::atomic includes a is_always_lock_free member defined as +/// static _LIBCPP_CONSTEXPR bool is_always_lock_free = __atomic_always_lock_free(sizeof(__a_), 0); +/// This was incorrectly being set to false for purecap std::atomic. +/// Ideally the builtin would take a type rather than a size but unfortunately it's too late to change that. +/// See also CodeGen/cheri/atomic-lock-free.c to show that we generate the appropriate code. +// RUN: %riscv64_cheri_purecap_cc1 -target-feature +a %s -fsyntax-only -verify=purecap +// RUN: %riscv64_cheri_cc1 -target-feature +a %s -fsyntax-only -verify=hybrid +// RUN: %riscv32_cheri_purecap_cc1 -target-feature +a %s -fsyntax-only -verify=purecap +// RUN: %riscv32_cheri_cc1 -target-feature +a %s -fsyntax-only -verify=hybrid + +_Static_assert(__atomic_always_lock_free(sizeof(char), 0), ""); +_Static_assert(__atomic_always_lock_free(sizeof(short), 0), ""); +_Static_assert(__atomic_always_lock_free(sizeof(int), 0), ""); +_Static_assert(__atomic_always_lock_free(sizeof(__INTPTR_TYPE__), 0), ""); +// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(__intcap), 0)'}} +_Static_assert(__atomic_always_lock_free(sizeof(__UINTPTR_TYPE__), 0), ""); +// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(unsigned __intcap), 0)'}} +_Static_assert(__atomic_always_lock_free(sizeof(void *), 0), ""); +// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(void *), 0)'}} +/// TODO: it would be nice if hybrid mode also allowed lock-free sizeof(void * __capability) +/// but this is not currently true since atomic RMW/CMPXCHG with capability +/// pointers are not supported. +_Static_assert(__atomic_always_lock_free(sizeof(void * __capability), 0), ""); // hybrid-error{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(void * __capability), 0)'}} +// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(void *), 0)'}} From 2c7c2b70b26d9af3395c2def6b839ca8b3c7d738 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Fri, 22 Sep 2023 09:47:42 -0700 Subject: [PATCH 15/18] [CHERI-RISC-V] Report true for __atomic_always_lock_free(sizeof(__intcap)) Now that we can expand all 2*XLen atomics inline (at least for purecap), we can report true for this builtin. This fixes problems such as std::atomic::is_lock_free reporting false in C++14 mode as well as a compilation error in compiler-rt atomic.c. --- clang/lib/Basic/Targets/RISCV.h | 18 ++++++- clang/test/CodeGen/cheri/atomic-lock-free.c | 52 +++++++++------------ clang/test/Preprocessor/cheri-lock-free.c | 12 +++-- clang/test/Sema/cheri/atomic-lock-free.c | 5 +- 4 files changed, 48 insertions(+), 39 deletions(-) diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h index a7c2b456b104..d1e333120e16 100644 --- a/clang/lib/Basic/Targets/RISCV.h +++ b/clang/lib/Basic/Targets/RISCV.h @@ -190,8 +190,15 @@ class LLVM_LIBRARY_VISIBILITY RISCV32TargetInfo : public RISCVTargetInfo { void setMaxAtomicWidth() override { MaxAtomicPromoteWidth = 128; - if (ISAInfo->hasExtension("a")) + if (ISAInfo->hasExtension("a")) { MaxAtomicInlineWidth = 32; + // With CHERI we support capability-size integer atomic operations without + // a libcall. Currently this is limited to purecap since in hybrid mode + // RMW/CMPXCHG with a capability pointer does not work yet. + // See https://github.com/CTSRD-CHERI/llvm-project/pull/490 + if (CapabilityABI) + MaxAtomicInlineWidth = 64; + } } uint64_t getPointerRangeForCHERICapability() const override { return 32; } @@ -226,8 +233,15 @@ class LLVM_LIBRARY_VISIBILITY RISCV64TargetInfo : public RISCVTargetInfo { void setMaxAtomicWidth() override { MaxAtomicPromoteWidth = 128; - if (ISAInfo->hasExtension("a")) + if (ISAInfo->hasExtension("a")) { MaxAtomicInlineWidth = 64; + // With CHERI we support capability-size integer atomic operations without + // a libcall. Currently this is limited to purecap since in hybrid mode + // RMW/CMPXCHG with a capability pointer does not work yet. + // See https://github.com/CTSRD-CHERI/llvm-project/pull/490 + if (CapabilityABI) + MaxAtomicInlineWidth = 128; + } } uint64_t getPointerRangeForCHERICapability() const override { return 64; } diff --git a/clang/test/CodeGen/cheri/atomic-lock-free.c b/clang/test/CodeGen/cheri/atomic-lock-free.c index c7dac7b32066..8d53b89c5ead 100644 --- a/clang/test/CodeGen/cheri/atomic-lock-free.c +++ b/clang/test/CodeGen/cheri/atomic-lock-free.c @@ -1,9 +1,10 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature /// Check that we emit inline atomics rather than library calls for capability-size atomics -// RUN: %riscv64_cheri_purecap_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=PURECAP64 -// RUN: %riscv64_cheri_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=HYBRID64 -// RUN: %riscv32_cheri_purecap_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=PURECAP32 -// RUN: %riscv32_cheri_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=HYBRID32 +// RUN: %riscv64_cheri_purecap_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify=purecap | opt -S -mem2reg | FileCheck %s --check-prefixes=PURECAP64 +// RUN: %riscv64_cheri_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify=hybrid | opt -S -mem2reg | FileCheck %s --check-prefixes=HYBRID64 +// RUN: %riscv32_cheri_purecap_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify=purecap | opt -S -mem2reg | FileCheck %s --check-prefixes=PURECAP32 +// RUN: %riscv32_cheri_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify=hybrid | opt -S -mem2reg | FileCheck %s --check-prefixes=HYBRID32 +// purecap-no-diagnostics #if __CHERI_CAPABILITY_WIDTH__ == 64 typedef __INT64_TYPE__ cap_size_int; @@ -70,9 +71,7 @@ __intcap load_cap(__intcap* i) { // PURECAP64-LABEL: define {{[^@]+}}@loadi128 // PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { // PURECAP64-NEXT: entry: -// PURECAP64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16, addrspace(200) -// PURECAP64-NEXT: call void @__atomic_load(i64 noundef 16, ptr addrspace(200) noundef [[I]], ptr addrspace(200) noundef [[ATOMIC_TEMP]], i32 noundef signext 5) -// PURECAP64-NEXT: [[TMP0:%.*]] = load i128, ptr addrspace(200) [[ATOMIC_TEMP]], align 16 +// PURECAP64-NEXT: [[TMP0:%.*]] = load atomic i128, ptr addrspace(200) [[I]] seq_cst, align 16 // PURECAP64-NEXT: ret i128 [[TMP0]] // // HYBRID64-LABEL: define {{[^@]+}}@loadi128 @@ -86,8 +85,8 @@ __intcap load_cap(__intcap* i) { // PURECAP32-LABEL: define {{[^@]+}}@loadi128 // PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { // PURECAP32-NEXT: entry: -// PURECAP32-NEXT: [[CALL:%.*]] = call i64 @__atomic_load_8(ptr addrspace(200) noundef [[I]], i32 noundef 5) -// PURECAP32-NEXT: ret i64 [[CALL]] +// PURECAP32-NEXT: [[TMP0:%.*]] = load atomic i64, ptr addrspace(200) [[I]] seq_cst, align 8 +// PURECAP32-NEXT: ret i64 [[TMP0]] // // HYBRID32-LABEL: define {{[^@]+}}@loadi128 // HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { @@ -97,7 +96,7 @@ __intcap load_cap(__intcap* i) { // cap_size_int loadi128(cap_size_int* i) { return __atomic_load_n(i, __ATOMIC_SEQ_CST); -// expected-warning@-1{{large atomic operation may incur significant performance penalty}} + // hybrid-warning@-1{{large atomic operation may incur significant performance penalty}} } // PURECAP64-LABEL: define {{[^@]+}}@xchg_long @@ -159,11 +158,7 @@ __intcap xchg_cap(__intcap* i, __intcap val) { // PURECAP64-LABEL: define {{[^@]+}}@xchg_i128 // PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]], i128 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { // PURECAP64-NEXT: entry: -// PURECAP64-NEXT: [[DOTATOMICTMP:%.*]] = alloca i128, align 16, addrspace(200) -// PURECAP64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16, addrspace(200) -// PURECAP64-NEXT: store i128 [[VAL]], ptr addrspace(200) [[DOTATOMICTMP]], align 16 -// PURECAP64-NEXT: call void @__atomic_exchange(i64 noundef 16, ptr addrspace(200) noundef [[I]], ptr addrspace(200) noundef [[DOTATOMICTMP]], ptr addrspace(200) noundef [[ATOMIC_TEMP]], i32 noundef signext 5) -// PURECAP64-NEXT: [[TMP0:%.*]] = load i128, ptr addrspace(200) [[ATOMIC_TEMP]], align 16 +// PURECAP64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[I]], i128 [[VAL]] seq_cst, align 16 // PURECAP64-NEXT: ret i128 [[TMP0]] // // HYBRID64-LABEL: define {{[^@]+}}@xchg_i128 @@ -179,8 +174,8 @@ __intcap xchg_cap(__intcap* i, __intcap val) { // PURECAP32-LABEL: define {{[^@]+}}@xchg_i128 // PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]], i64 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] { // PURECAP32-NEXT: entry: -// PURECAP32-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(ptr addrspace(200) noundef [[I]], i64 noundef [[VAL]], i32 noundef 5) -// PURECAP32-NEXT: ret i64 [[CALL]] +// PURECAP32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[I]], i64 [[VAL]] seq_cst, align 8 +// PURECAP32-NEXT: ret i64 [[TMP0]] // // HYBRID32-LABEL: define {{[^@]+}}@xchg_i128 // HYBRID32-SAME: (ptr noundef [[I:%.*]], i64 noundef [[VAL:%.*]]) #[[ATTR0]] { @@ -190,7 +185,7 @@ __intcap xchg_cap(__intcap* i, __intcap val) { // cap_size_int xchg_i128(cap_size_int* i, cap_size_int val) { return __atomic_exchange_n(i, val, __ATOMIC_SEQ_CST); - // expected-warning@-1{{large atomic operation may incur significant performance penalty}} + // hybrid-warning@-1{{large atomic operation may incur significant performance penalty}} } // PURECAP64-LABEL: define {{[^@]+}}@lock_free_long @@ -223,8 +218,7 @@ _Bool lock_free_long(long* l) { // PURECAP64-LABEL: define {{[^@]+}}@lock_free_cap // PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { // PURECAP64-NEXT: entry: -// PURECAP64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr addrspace(200) noundef [[I]]) -// PURECAP64-NEXT: ret i1 [[CALL]] +// PURECAP64-NEXT: ret i1 true // // HYBRID64-LABEL: define {{[^@]+}}@lock_free_cap // HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { @@ -235,8 +229,7 @@ _Bool lock_free_long(long* l) { // PURECAP32-LABEL: define {{[^@]+}}@lock_free_cap // PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { // PURECAP32-NEXT: entry: -// PURECAP32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr addrspace(200) noundef [[I]]) -// PURECAP32-NEXT: ret i1 [[CALL]] +// PURECAP32-NEXT: ret i1 true // // HYBRID32-LABEL: define {{[^@]+}}@lock_free_cap // HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { @@ -245,17 +238,17 @@ _Bool lock_free_long(long* l) { // HYBRID32-NEXT: ret i1 [[CALL]] // _Bool lock_free_cap(__intcap* i) { - // TODO: _Static_assert(__atomic_always_lock_free(sizeof(*i), 0), ""); +#ifdef __CHERI_PURE_CAPABILITY__ + _Static_assert(__atomic_always_lock_free(sizeof(*i), 0), ""); +#endif return __atomic_is_lock_free(sizeof(*i), i); } // -// FIXME: should return true here // PURECAP64-LABEL: define {{[^@]+}}@lock_free_i128 // PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { // PURECAP64-NEXT: entry: -// PURECAP64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr addrspace(200) noundef [[I]]) -// PURECAP64-NEXT: ret i1 [[CALL]] +// PURECAP64-NEXT: ret i1 true // // HYBRID64-LABEL: define {{[^@]+}}@lock_free_i128 // HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { @@ -266,8 +259,7 @@ _Bool lock_free_cap(__intcap* i) { // PURECAP32-LABEL: define {{[^@]+}}@lock_free_i128 // PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] { // PURECAP32-NEXT: entry: -// PURECAP32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr addrspace(200) noundef [[I]]) -// PURECAP32-NEXT: ret i1 [[CALL]] +// PURECAP32-NEXT: ret i1 true // // HYBRID32-LABEL: define {{[^@]+}}@lock_free_i128 // HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] { @@ -276,6 +268,8 @@ _Bool lock_free_cap(__intcap* i) { // HYBRID32-NEXT: ret i1 [[CALL]] // _Bool lock_free_i128(cap_size_int* i) { - // TODO: _Static_assert(__atomic_always_lock_free(sizeof(*i), 0), ""); +#ifdef __CHERI_PURE_CAPABILITY__ + _Static_assert(__atomic_always_lock_free(sizeof(*i), 0), ""); +#endif return __atomic_is_lock_free(sizeof(*i), i); } diff --git a/clang/test/Preprocessor/cheri-lock-free.c b/clang/test/Preprocessor/cheri-lock-free.c index 17bb21e3c359..350025a6eec5 100644 --- a/clang/test/Preprocessor/cheri-lock-free.c +++ b/clang/test/Preprocessor/cheri-lock-free.c @@ -1,9 +1,9 @@ /// Check that we report pointers as being always lock-free, otherwise /// ends up using locks with -ffreestanding. // RUN: %riscv32_cheri_cc1 -fgnuc-version=4.2.1 -target-feature +a -E -dM %s \ -// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-32 --implicit-check-not=_LOCK_FREE +// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-32-HYBRID --implicit-check-not=_LOCK_FREE // RUN: %riscv32_cheri_purecap_cc1 -fgnuc-version=4.2.1 -target-feature +a -E -dM %s \ -// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-32 --implicit-check-not=_LOCK_FREE +// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-32-PURECAP --implicit-check-not=_LOCK_FREE // RUN: %riscv64_cheri_cc1 -fgnuc-version=4.2.1 -target-feature +a -E -dM %s \ // RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-64 --implicit-check-not=_LOCK_FREE // RUN: %riscv64_cheri_purecap_cc1 -fgnuc-version=4.2.1 -target-feature +a -E -dM %s \ @@ -15,7 +15,9 @@ // CHECK: #define __CLANG_ATOMIC_CHAR_LOCK_FREE 2 // CHECK: #define __CLANG_ATOMIC_INT_LOCK_FREE 2 // CHECK-64: #define __CLANG_ATOMIC_LLONG_LOCK_FREE 2 -// CHECK-32: #define __CLANG_ATOMIC_LLONG_LOCK_FREE 1 +// NB: LLONG is always lockfree for RV32 purecap since we use capability atomics. +// CHECK-32-HYBRID: #define __CLANG_ATOMIC_LLONG_LOCK_FREE 1 +// CHECK-32-PURECAP: #define __CLANG_ATOMIC_LLONG_LOCK_FREE 2 // CHECK: #define __CLANG_ATOMIC_LONG_LOCK_FREE 2 // CHECK: #define __CLANG_ATOMIC_POINTER_LOCK_FREE 2 // CHECK: #define __CLANG_ATOMIC_SHORT_LOCK_FREE 2 @@ -26,7 +28,9 @@ // CHECK: #define __GCC_ATOMIC_CHAR_LOCK_FREE 2 // CHECK: #define __GCC_ATOMIC_INT_LOCK_FREE 2 // CHECK-64: #define __GCC_ATOMIC_LLONG_LOCK_FREE 2 -// CHECK-32: #define __GCC_ATOMIC_LLONG_LOCK_FREE 1 +// NB: LLONG is always lockfree for RV32 purecap since we use capability atomics. +// CHECK-32-HYBRID: #define __GCC_ATOMIC_LLONG_LOCK_FREE 1 +// CHECK-32-PURECAP: #define __GCC_ATOMIC_LLONG_LOCK_FREE 2 // CHECK: #define __GCC_ATOMIC_LONG_LOCK_FREE 2 // CHECK: #define __GCC_ATOMIC_POINTER_LOCK_FREE 2 // CHECK: #define __GCC_ATOMIC_SHORT_LOCK_FREE 2 diff --git a/clang/test/Sema/cheri/atomic-lock-free.c b/clang/test/Sema/cheri/atomic-lock-free.c index e68d322411e3..746e5676f56b 100644 --- a/clang/test/Sema/cheri/atomic-lock-free.c +++ b/clang/test/Sema/cheri/atomic-lock-free.c @@ -8,18 +8,15 @@ // RUN: %riscv64_cheri_cc1 -target-feature +a %s -fsyntax-only -verify=hybrid // RUN: %riscv32_cheri_purecap_cc1 -target-feature +a %s -fsyntax-only -verify=purecap // RUN: %riscv32_cheri_cc1 -target-feature +a %s -fsyntax-only -verify=hybrid +// purecap-no-diagnostics _Static_assert(__atomic_always_lock_free(sizeof(char), 0), ""); _Static_assert(__atomic_always_lock_free(sizeof(short), 0), ""); _Static_assert(__atomic_always_lock_free(sizeof(int), 0), ""); _Static_assert(__atomic_always_lock_free(sizeof(__INTPTR_TYPE__), 0), ""); -// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(__intcap), 0)'}} _Static_assert(__atomic_always_lock_free(sizeof(__UINTPTR_TYPE__), 0), ""); -// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(unsigned __intcap), 0)'}} _Static_assert(__atomic_always_lock_free(sizeof(void *), 0), ""); -// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(void *), 0)'}} /// TODO: it would be nice if hybrid mode also allowed lock-free sizeof(void * __capability) /// but this is not currently true since atomic RMW/CMPXCHG with capability /// pointers are not supported. _Static_assert(__atomic_always_lock_free(sizeof(void * __capability), 0), ""); // hybrid-error{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(void * __capability), 0)'}} -// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(void *), 0)'}} From 1dcc97c93665beb9e1994fca2199ff5e08978c00 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Thu, 15 Feb 2024 16:37:49 -0800 Subject: [PATCH 16/18] Add a regression test for cmpxchg exact crash We are assuming there can only ever be one memory operand on the pseudo instructions but there could be more than one if the branch-folder pass merges identical blocks. Found while building CheriBSD. --- .../Inputs/cmpxchg-exact-branch-folder.ll | 86 +++++++++++ .../MIPS/cmpxchg-exact-branch-folder.ll | 135 ++++++++++++++++++ .../RISCV32/cmpxchg-exact-branch-folder.ll | 125 ++++++++++++++++ .../RISCV64/cmpxchg-exact-branch-folder.ll | 126 ++++++++++++++++ 4 files changed, 472 insertions(+) create mode 100644 llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll create mode 100644 llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-exact-branch-folder.ll create mode 100644 llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-exact-branch-folder.ll create mode 100644 llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-exact-branch-folder.ll diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll new file mode 100644 index 000000000000..57ee81ce26ae --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll @@ -0,0 +1,86 @@ +; CHERI-GENERIC-UTC: llc +; CHERI-GENERIC-UTC: mir +@IF-RISCV@; RUN: llc @PURECAP_HARDFLOAT_ARGS@ -mattr=+a < %s --stop-after=branch-folder | FileCheck %s --check-prefixes=MIR +@IFNOT-RISCV@; RUN: llc @PURECAP_HARDFLOAT_ARGS@ < %s --stop-after=branch-folder --enable-tail-merge | FileCheck %s --check-prefixes=MIR +@IF-RISCV@; RUN: not --crash llc @PURECAP_HARDFLOAT_ARGS@ -mattr=+a < %s +@IFNOT-RISCV@; RUN: not --crash llc @PURECAP_HARDFLOAT_ARGS@ --enable-tail-merge < %s +; REQUIRES: asserts + +; The branch-folder MIR pass will merge the two blocks inside these functions but +; since the base pointer is distinct it will have two MachineMemOperands. +; The cmpxchg exact logic stored the exact flag in the MachineMemOperand and +; previously assumed there would only ever be one operand, so this test ensures +; we can handle the merged logic. + +define dso_local signext i32 @merge_i32(i1 %cond1, ptr addrspace(200) %ptr, i32 %newval, i32 %cmpval) { +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg ptr addrspace(200) %ld1, i32 %cmpval, i32 %newval release monotonic, align 4 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, i32 %cmpval, i32 %newval release monotonic, align 4 + br label %end + +end: + ret i32 0 +} + +define dso_local signext i32 @merge_ptr_addr(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} + +define dso_local signext i32 @merge_ptr_exact(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg exact ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg exact ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} + +; FIXME: these two branches should not be merged! +define dso_local signext i32 @merge_ptr_mismatch_exact_flag(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg exact ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-exact-branch-folder.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-exact-branch-folder.ll new file mode 100644 index 000000000000..ec1750b4e589 --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-exact-branch-folder.ll @@ -0,0 +1,135 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --force-update +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll +; CHERI-GENERIC-UTC: llc +; CHERI-GENERIC-UTC: mir +; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap < %s --stop-after=branch-folder --enable-tail-merge | FileCheck %s --check-prefixes=MIR +; RUN: not --crash llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap --enable-tail-merge < %s +; REQUIRES: asserts + +; The branch-folder MIR pass will merge the two blocks inside these functions but +; since the base pointer is distinct it will have two MachineMemOperands. +; The cmpxchg exact logic stored the exact flag in the MachineMemOperand and +; previously assumed there would only ever be one operand, so this test ensures +; we can handle the merged logic. + +define dso_local signext i32 @merge_i32(i1 %cond1, ptr addrspace(200) %ptr, i32 %newval, i32 %cmpval) { + ; MIR-LABEL: name: merge_i32 + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c3, $a0_64, $a1_64, $a2_64 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $v0 = SLL renamable $a2, 0, implicit killed $a2_64 + ; MIR-NEXT: renamable $at = SLL renamable $a0, 0, implicit killed $a0_64 + ; MIR-NEXT: renamable $at = ANDi killed renamable $at, 1 + ; MIR-NEXT: renamable $v1 = SLL renamable $a1, 0, implicit killed $a1_64 + ; MIR-NEXT: renamable $c1 = LOADCAP $zero_64, 0, killed renamable $c3 :: (load (s128) from %ir.ptr, addrspace 200) + ; MIR-NEXT: SYNC 0 + ; MIR-NEXT: dead early-clobber renamable $at = CAP_ATOMIC_CMP_SWAP_I32_POSTRA killed renamable $c1, killed renamable $v0, killed renamable $v1, implicit-def dead early-clobber renamable $a0 :: (load store monotonic monotonic (s32) on %ir.ld2, addrspace 200), (load store monotonic monotonic (s32) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $v0_64 = DADDiu $zero_64, 0 + ; MIR-NEXT: CapRetPseudo implicit $v0_64 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg ptr addrspace(200) %ld1, i32 %cmpval, i32 %newval release monotonic, align 4 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, i32 %cmpval, i32 %newval release monotonic, align 4 + br label %end + +end: + ret i32 0 +} + +define dso_local signext i32 @merge_ptr_addr(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { + ; MIR-LABEL: name: merge_ptr_addr + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c3, $c4, $c5, $a0_64 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $at = SLL renamable $a0, 0, implicit killed $a0_64 + ; MIR-NEXT: renamable $at = ANDi killed renamable $at, 1 + ; MIR-NEXT: renamable $c1 = LOADCAP $zero_64, 0, killed renamable $c3 :: (load (s128) from %ir.ptr, addrspace 200) + ; MIR-NEXT: SYNC 0 + ; MIR-NEXT: dead early-clobber renamable $c2 = CAP_ATOMIC_CMP_SWAP_CAP_POSTRA killed renamable $c1, killed renamable $c5, killed renamable $c4, implicit-def dead early-clobber renamable $at_64 :: (load store monotonic monotonic (s128) on %ir.ld2, addrspace 200), (load store monotonic monotonic (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $v0_64 = DADDiu $zero_64, 0 + ; MIR-NEXT: CapRetPseudo implicit killed $v0_64 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} + +define dso_local signext i32 @merge_ptr_exact(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { + ; MIR-LABEL: name: merge_ptr_exact + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c3, $c4, $c5, $a0_64 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $at = SLL renamable $a0, 0, implicit killed $a0_64 + ; MIR-NEXT: renamable $at = ANDi killed renamable $at, 1 + ; MIR-NEXT: renamable $c1 = LOADCAP $zero_64, 0, killed renamable $c3 :: (load (s128) from %ir.ptr, addrspace 200) + ; MIR-NEXT: SYNC 0 + ; MIR-NEXT: dead early-clobber renamable $c2 = CAP_ATOMIC_CMP_SWAP_CAP_POSTRA killed renamable $c1, killed renamable $c5, killed renamable $c4, implicit-def dead early-clobber renamable $at_64 :: (load store monotonic monotonic exact (s128) on %ir.ld2, addrspace 200), (load store monotonic monotonic exact (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $v0_64 = DADDiu $zero_64, 0 + ; MIR-NEXT: CapRetPseudo implicit killed $v0_64 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg exact ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg exact ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} + +; FIXME: these two branches should not be merged! +define dso_local signext i32 @merge_ptr_mismatch_exact_flag(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { + ; MIR-LABEL: name: merge_ptr_mismatch_exact_flag + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c3, $c4, $c5, $a0_64 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $at = SLL renamable $a0, 0, implicit killed $a0_64 + ; MIR-NEXT: renamable $at = ANDi killed renamable $at, 1 + ; MIR-NEXT: renamable $c1 = LOADCAP $zero_64, 0, killed renamable $c3 :: (load (s128) from %ir.ptr, addrspace 200) + ; MIR-NEXT: SYNC 0 + ; MIR-NEXT: dead early-clobber renamable $c2 = CAP_ATOMIC_CMP_SWAP_CAP_POSTRA killed renamable $c1, killed renamable $c5, killed renamable $c4, implicit-def dead early-clobber renamable $at_64 :: (load store monotonic monotonic (s128) on %ir.ld2, addrspace 200), (load store monotonic monotonic exact (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $v0_64 = DADDiu $zero_64, 0 + ; MIR-NEXT: CapRetPseudo implicit killed $v0_64 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg exact ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; MIR: {{.*}} diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-exact-branch-folder.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-exact-branch-folder.ll new file mode 100644 index 000000000000..cc3940f55040 --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-exact-branch-folder.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --force-update +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll +; CHERI-GENERIC-UTC: llc +; CHERI-GENERIC-UTC: mir +; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -mattr=+a < %s --stop-after=branch-folder | FileCheck %s --check-prefixes=MIR +; RUN: not --crash llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -mattr=+a < %s +; REQUIRES: asserts + +; The branch-folder MIR pass will merge the two blocks inside these functions but +; since the base pointer is distinct it will have two MachineMemOperands. +; The cmpxchg exact logic stored the exact flag in the MachineMemOperand and +; previously assumed there would only ever be one operand, so this test ensures +; we can handle the merged logic. + +define dso_local signext i32 @merge_i32(i1 %cond1, ptr addrspace(200) %ptr, i32 %newval, i32 %cmpval) { + ; MIR-LABEL: name: merge_i32 + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c11, $x10, $x12, $x13 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 + ; MIR-NEXT: renamable $c10 = CLC_64 killed renamable $c11, 0 :: (load (s64) from %ir.ptr, align 16, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $x11, dead early-clobber renamable $x14 = PseudoCheriCmpXchg32 killed renamable $c10, killed renamable $x13, killed renamable $x12, 5 :: (load store release monotonic (s32) on %ir.ld2, addrspace 200), (load store release monotonic (s32) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $x10 = COPY $x0 + ; MIR-NEXT: PseudoCRET implicit $x10 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg ptr addrspace(200) %ld1, i32 %cmpval, i32 %newval release monotonic, align 4 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, i32 %cmpval, i32 %newval release monotonic, align 4 + br label %end + +end: + ret i32 0 +} + +define dso_local signext i32 @merge_ptr_addr(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { + ; MIR-LABEL: name: merge_ptr_addr + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 + ; MIR-NEXT: renamable $c10 = CLC_64 killed renamable $c11, 0 :: (load (s64) from %ir.ptr, align 16, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s64) on %ir.ld2, addrspace 200), (load store release monotonic (s64) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $x10 = COPY $x0 + ; MIR-NEXT: PseudoCRET implicit $x10 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} + +define dso_local signext i32 @merge_ptr_exact(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { + ; MIR-LABEL: name: merge_ptr_exact + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 + ; MIR-NEXT: renamable $c10 = CLC_64 killed renamable $c11, 0 :: (load (s64) from %ir.ptr, align 16, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic exact (s64) on %ir.ld2, addrspace 200), (load store release monotonic exact (s64) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $x10 = COPY $x0 + ; MIR-NEXT: PseudoCRET implicit $x10 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg exact ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg exact ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} + +; FIXME: these two branches should not be merged! +define dso_local signext i32 @merge_ptr_mismatch_exact_flag(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { + ; MIR-LABEL: name: merge_ptr_mismatch_exact_flag + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 + ; MIR-NEXT: renamable $c10 = CLC_64 killed renamable $c11, 0 :: (load (s64) from %ir.ptr, align 16, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s64) on %ir.ld2, addrspace 200), (load store release monotonic exact (s64) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $x10 = COPY $x0 + ; MIR-NEXT: PseudoCRET implicit $x10 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg exact ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; MIR: {{.*}} diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-exact-branch-folder.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-exact-branch-folder.ll new file mode 100644 index 000000000000..d6bf459f2eb8 --- /dev/null +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-exact-branch-folder.ll @@ -0,0 +1,126 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --force-update +; DO NOT EDIT -- This file was generated from test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll +; CHERI-GENERIC-UTC: llc +; CHERI-GENERIC-UTC: mir +; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -mattr=+a < %s --stop-after=branch-folder | FileCheck %s --check-prefixes=MIR +; RUN: not --crash llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -mattr=+a < %s +; REQUIRES: asserts + +; The branch-folder MIR pass will merge the two blocks inside these functions but +; since the base pointer is distinct it will have two MachineMemOperands. +; The cmpxchg exact logic stored the exact flag in the MachineMemOperand and +; previously assumed there would only ever be one operand, so this test ensures +; we can handle the merged logic. + +define dso_local signext i32 @merge_i32(i1 %cond1, ptr addrspace(200) %ptr, i32 %newval, i32 %cmpval) { + ; MIR-LABEL: name: merge_i32 + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c11, $x10, $x12, $x13 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 + ; MIR-NEXT: renamable $c10 = CLC_128 killed renamable $c11, 0 :: (load (s128) from %ir.ptr, addrspace 200) + ; MIR-NEXT: renamable $x11 = ADDIW killed renamable $x13, 0 + ; MIR-NEXT: dead early-clobber renamable $x13, dead early-clobber renamable $x14 = PseudoCheriCmpXchg32 killed renamable $c10, killed renamable $x11, killed renamable $x12, 5 :: (load store release monotonic (s32) on %ir.ld2, addrspace 200), (load store release monotonic (s32) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $x10 = COPY $x0 + ; MIR-NEXT: PseudoCRET implicit $x10 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg ptr addrspace(200) %ld1, i32 %cmpval, i32 %newval release monotonic, align 4 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, i32 %cmpval, i32 %newval release monotonic, align 4 + br label %end + +end: + ret i32 0 +} + +define dso_local signext i32 @merge_ptr_addr(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { + ; MIR-LABEL: name: merge_ptr_addr + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 + ; MIR-NEXT: renamable $c10 = CLC_128 killed renamable $c11, 0 :: (load (s128) from %ir.ptr, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s128) on %ir.ld2, addrspace 200), (load store release monotonic (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $x10 = COPY $x0 + ; MIR-NEXT: PseudoCRET implicit $x10 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} + +define dso_local signext i32 @merge_ptr_exact(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { + ; MIR-LABEL: name: merge_ptr_exact + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 + ; MIR-NEXT: renamable $c10 = CLC_128 killed renamable $c11, 0 :: (load (s128) from %ir.ptr, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic exact (s128) on %ir.ld2, addrspace 200), (load store release monotonic exact (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $x10 = COPY $x0 + ; MIR-NEXT: PseudoCRET implicit $x10 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg exact ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg exact ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} + +; FIXME: these two branches should not be merged! +define dso_local signext i32 @merge_ptr_mismatch_exact_flag(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { + ; MIR-LABEL: name: merge_ptr_mismatch_exact_flag + ; MIR: bb.0.entry: + ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 + ; MIR-NEXT: renamable $c10 = CLC_128 killed renamable $c11, 0 :: (load (s128) from %ir.ptr, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s128) on %ir.ld2, addrspace 200), (load store release monotonic exact (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: $x10 = COPY $x0 + ; MIR-NEXT: PseudoCRET implicit $x10 +entry: + br i1 %cond1, label %if.then, label %if.else + +if.then: + %ld1 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic1 = cmpxchg exact ptr addrspace(200) %ld1, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +if.else: + %ld2 = load ptr addrspace(200), ptr addrspace(200) %ptr, align 16 + %atomic2 = cmpxchg ptr addrspace(200) %ld2, ptr addrspace(200) %cmpval, ptr addrspace(200) %newval release monotonic, align 16 + br label %end + +end: + ret i32 0 +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; MIR: {{.*}} From b30abe04c5d046003576a22a334f3a44eb88a39b Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Sat, 17 Feb 2024 15:59:24 -0800 Subject: [PATCH 17/18] [CHERI-Generic] Make it easier to add new substitutions --- .../CodeGen/CHERI-Generic/regenerate-all.py | 27 ++++++++++--------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/llvm/test/CodeGen/CHERI-Generic/regenerate-all.py b/llvm/test/CodeGen/CHERI-Generic/regenerate-all.py index 7c7c738ca16f..aff8bab21edc 100755 --- a/llvm/test/CodeGen/CHERI-Generic/regenerate-all.py +++ b/llvm/test/CodeGen/CHERI-Generic/regenerate-all.py @@ -15,17 +15,23 @@ CHERI_GENERIC_UTC_CMD = re.compile((r'.*' + CHERI_GENERIC_UTC_KEY + r'\s*(?P.*)\s*$').encode("utf-8")) +def _list2bytes(l: "list[str]") -> bytes: + return " ".join(l).encode("utf-8") + class ArchSpecificValues(object): def __init__(self, architecture: str, *, cap_range, cap_width, common_args: list, hybrid_sf_args: list, hybrid_hf_args: list, purecap_sf_args: list, purecap_hf_args: list, datalayout: bytes, base_architecture: str = None): - self.hybrid_datalayout = datalayout - self.purecap_datalayout = datalayout + b"-A200-P200-G200" - self.hybrid_softfloat_args = (" ".join(common_args + hybrid_sf_args)).encode("utf-8") - self.hybrid_hardfloat_args = (" ".join(common_args + hybrid_hf_args)).encode("utf-8") - self.purecap_softfloat_args = (" ".join(common_args + purecap_sf_args)).encode("utf-8") - self.purecap_hardfloat_args = (" ".join(common_args + purecap_hf_args)).encode("utf-8") + + self.replacements: "dict[bytes, bytes]" = { + b"@HYBRID_DATALAYOUT@": datalayout, + b"@PURECAP_DATALAYOUT@": datalayout + b"-A200-P200-G200", + b"@HYBRID_SOFTFLOAT_ARGS@": _list2bytes(common_args + hybrid_sf_args), + b"@HYBRID_HARDFLOAT_ARGS@": _list2bytes(common_args + hybrid_hf_args), + b"@PURECAP_SOFTFLOAT_ARGS@": _list2bytes(common_args + purecap_sf_args), + b"@PURECAP_HARDFLOAT_ARGS@": _list2bytes(common_args + purecap_hf_args), + } self.cap_width = cap_width self.cap_range = cap_range self.name = architecture @@ -200,12 +206,9 @@ def update_one_test(test_name: str, input_file: typing.BinaryIO, int(math.log2(arch_def.cap_range / 8))).encode("utf-8")) # Opt tests require a datalayout since the lit substitutions don't # include it in their commandline - converted_line = converted_line.replace(b"@PURECAP_DATALAYOUT@", arch_def.purecap_datalayout) - converted_line = converted_line.replace(b"@HYBRID_DATALAYOUT@", arch_def.hybrid_datalayout) - converted_line = converted_line.replace(b"@HYBRID_SOFTFLOAT_ARGS@", arch_def.hybrid_softfloat_args) - converted_line = converted_line.replace(b"@HYBRID_HARDFLOAT_ARGS@", arch_def.hybrid_hardfloat_args) - converted_line = converted_line.replace(b"@PURECAP_SOFTFLOAT_ARGS@", arch_def.purecap_softfloat_args) - converted_line = converted_line.replace(b"@PURECAP_HARDFLOAT_ARGS@", arch_def.purecap_hardfloat_args) + for key, replacement in arch_def.replacements.items(): + converted_line = converted_line.replace(key, replacement) + if args.verbose and converted_line != line: print("Adjusted line:") print(" Before:", line) From 0eb4a79fa201fb06a3bcad354760eec8629749e9 Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Sat, 17 Feb 2024 16:28:27 -0800 Subject: [PATCH 18/18] [CHERI] Use separate Pseudo instructions for cmpxchg nodes Using separate pseudos for exact and inexact comparsions ensures that our lowering does not depend on the MachineMemOperand (after SDAG) since passes could drop it (which means use the most conservative approach). This adds a bit of boilerplate but it's not as bad as I expected and is less fragile than the previous approach. --- llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 6 + .../include/llvm/Target/TargetSelectionDAG.td | 18 ++- llvm/lib/Target/Mips/MipsExpandPseudo.cpp | 12 +- llvm/lib/Target/Mips/MipsISelLowering.cpp | 11 +- llvm/lib/Target/Mips/MipsInstrCheri.td | 14 +- .../RISCV/RISCVExpandAtomicPseudoInsts.cpp | 11 +- llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td | 12 +- .../Inputs/cmpxchg-exact-branch-folder.ll | 9 +- .../MIPS/cmpxchg-exact-branch-folder.ll | 133 +++++++++++++++++- .../RISCV32/cmpxchg-exact-branch-folder.ll | 106 +++++++++++++- .../RISCV64/cmpxchg-exact-branch-folder.ll | 107 +++++++++++++- 11 files changed, 391 insertions(+), 48 deletions(-) diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index 389fbce72ad0..094bbe25625b 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1453,6 +1453,12 @@ class AtomicSDNode : public MemSDNode { return MMO->getFailureOrdering(); } + /// Return true if the memory operation ordering is Unordered or higher. + bool isExactCmpXchg() const { + assert(getMemoryVT().isFatPointer()); + return MMO->isExactCompare(); + } + // Methods to support isa and dyn_cast static bool classof(const SDNode *N) { return N->getOpcode() == ISD::ATOMIC_CMP_SWAP || diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td index e358487246dc..65fef616905c 100644 --- a/llvm/include/llvm/Target/TargetSelectionDAG.td +++ b/llvm/include/llvm/Target/TargetSelectionDAG.td @@ -1888,10 +1888,21 @@ multiclass binary_atomic_op_cap { defm NAME : binary_atomic_op_ord; } -multiclass ternary_atomic_op_cap { +multiclass ternary_atomic_op_cap_inexact { def "" : PatFrag<(ops node:$ptr, node:$cmp, node:$val), (atomic_op node:$ptr, node:$cmp, node:$val), [{ - return cast(N)->getMemoryVT().isCapability(); + auto AN = cast(N); + return AN->getMemoryVT().isCapability() && !AN->isExactCmpXchg(); + }]>; + + defm NAME : ternary_atomic_op_ord; +} + +multiclass ternary_atomic_op_cap_exact { + def "" : PatFrag<(ops node:$ptr, node:$cmp, node:$val), + (atomic_op node:$ptr, node:$cmp, node:$val), [{ + auto AN = cast(N); + return AN->getMemoryVT().isCapability() && AN->isExactCmpXchg(); }]>; defm NAME : ternary_atomic_op_ord; @@ -1910,7 +1921,8 @@ defm atomic_load_max_cap : binary_atomic_op_cap; defm atomic_load_umin_cap : binary_atomic_op_cap; defm atomic_load_umax_cap : binary_atomic_op_cap; defm atomic_store_cap : binary_atomic_op_cap; -defm atomic_cmp_swap_cap : ternary_atomic_op_cap; +defm atomic_cmp_swap_cap_addr : ternary_atomic_op_cap_inexact; +defm atomic_cmp_swap_cap_exact : ternary_atomic_op_cap_exact; def atomic_load_cap : PatFrag<(ops node:$ptr), diff --git a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp index 5c2168d9843a..9af181997df8 100644 --- a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp +++ b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp @@ -212,6 +212,7 @@ bool MipsExpandPseudo::expandAtomicCmpSwap(MachineBasicBlock &BB, unsigned Size = -1; bool IsCapCmpXchg = false; + bool UseExactEquals = false; switch(I->getOpcode()) { case Mips::ATOMIC_CMP_SWAP_I32_POSTRA: Size = 4; break; case Mips::ATOMIC_CMP_SWAP_I64_POSTRA: Size = 8; break; @@ -219,7 +220,10 @@ bool MipsExpandPseudo::expandAtomicCmpSwap(MachineBasicBlock &BB, case Mips::CAP_ATOMIC_CMP_SWAP_I16_POSTRA: Size = 2; break; case Mips::CAP_ATOMIC_CMP_SWAP_I32_POSTRA: Size = 4; break; case Mips::CAP_ATOMIC_CMP_SWAP_I64_POSTRA: Size = 8; break; - case Mips::CAP_ATOMIC_CMP_SWAP_CAP_POSTRA: + case Mips::CAP_ATOMIC_CMP_SWAP_CAP_EXACT_POSTRA: + UseExactEquals = true; + LLVM_FALLTHROUGH; + case Mips::CAP_ATOMIC_CMP_SWAP_CAP_ADDR_POSTRA: Size = CAP_ATOMIC_SIZE; IsCapCmpXchg = true; break; @@ -327,9 +331,6 @@ bool MipsExpandPseudo::expandAtomicCmpSwap(MachineBasicBlock &BB, if (!IsCapOp) LLOp.addImm(0); if (IsCapCmpXchg) { - assert(I->hasOneMemOperand()); - bool UseExactEquals = - STI->useCheriExactEquals() || I->memoperands()[0]->isExactCompare(); unsigned CapCmp = UseExactEquals ? Mips::CEXEQ : Mips::CEQ; // load, compare, and exit if not equal // cllc dest, ptr @@ -1098,7 +1099,8 @@ bool MipsExpandPseudo::expandMI(MachineBasicBlock &MBB, case Mips::CAP_ATOMIC_CMP_SWAP_I16_POSTRA: case Mips::CAP_ATOMIC_CMP_SWAP_I32_POSTRA: case Mips::CAP_ATOMIC_CMP_SWAP_I64_POSTRA: - case Mips::CAP_ATOMIC_CMP_SWAP_CAP_POSTRA: + case Mips::CAP_ATOMIC_CMP_SWAP_CAP_ADDR_POSTRA: + case Mips::CAP_ATOMIC_CMP_SWAP_CAP_EXACT_POSTRA: return expandAtomicCmpSwap(MBB, MBBI, NMBB, /*IsCapOp=*/true); case Mips::PseudoPccRelativeAddressPostRA: return expandPccRelativeAddr(MBB, MBBI, NMBB); diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index d557e5ee9acb..d9dc8dfaaed2 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -1837,7 +1837,8 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, case Mips::CAP_ATOMIC_CMP_SWAP_I16: case Mips::CAP_ATOMIC_CMP_SWAP_I32: case Mips::CAP_ATOMIC_CMP_SWAP_I64: - case Mips::CAP_ATOMIC_CMP_SWAP_CAP: + case Mips::CAP_ATOMIC_CMP_SWAP_CAP_ADDR: + case Mips::CAP_ATOMIC_CMP_SWAP_CAP_EXACT: return emitAtomicCmpSwap(MI, BB); @@ -2445,8 +2446,12 @@ MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI, AtomicOp = Mips::CAP_ATOMIC_CMP_SWAP_I64_POSTRA; ScratchTy = MVT::i64; break; - case Mips::CAP_ATOMIC_CMP_SWAP_CAP: - AtomicOp = Mips::CAP_ATOMIC_CMP_SWAP_CAP_POSTRA; + case Mips::CAP_ATOMIC_CMP_SWAP_CAP_ADDR: + AtomicOp = Mips::CAP_ATOMIC_CMP_SWAP_CAP_ADDR_POSTRA; + ScratchTy = MVT::i64; + break; + case Mips::CAP_ATOMIC_CMP_SWAP_CAP_EXACT: + AtomicOp = Mips::CAP_ATOMIC_CMP_SWAP_CAP_EXACT_POSTRA; ScratchTy = MVT::i64; break; default: diff --git a/llvm/lib/Target/Mips/MipsInstrCheri.td b/llvm/lib/Target/Mips/MipsInstrCheri.td index 7680b0c2f13c..5f9a863ee67a 100644 --- a/llvm/lib/Target/Mips/MipsInstrCheri.td +++ b/llvm/lib/Target/Mips/MipsInstrCheri.td @@ -759,8 +759,9 @@ let usesCustomInserter = 1 in { // Capability atomics: // FIXME: this seems wrong it should be CheriGPROrCNULL - def CAP_ATOMIC_SWAP_CAP : CapAtomic2Ops; - def CAP_ATOMIC_CMP_SWAP_CAP : CapAtomicCmpSwap; + def CAP_ATOMIC_SWAP_CAP : CapAtomic2Ops; + def CAP_ATOMIC_CMP_SWAP_CAP_ADDR : CapAtomicCmpSwap; + def CAP_ATOMIC_CMP_SWAP_CAP_EXACT : CapAtomicCmpSwap; // TODO: implement these: // def ATOMIC_LOAD_ADD_CAP : Atomic2Ops; @@ -812,8 +813,9 @@ def CAP_ATOMIC_CMP_SWAP_I64_POSTRA : CapAtomicCmpSwapPostRA; // Capability postra atomics: // TODO: do we want add/sub/or/xor/nand/and for capabilities? // I guess add/sub makes sense but the others don't -def CAP_ATOMIC_SWAP_CAP_POSTRA : CapAtomic2OpsPostRA; -def CAP_ATOMIC_CMP_SWAP_CAP_POSTRA : CapAtomicCmpSwapPostRA; +def CAP_ATOMIC_SWAP_CAP_POSTRA : CapAtomic2OpsPostRA; +def CAP_ATOMIC_CMP_SWAP_CAP_ADDR_POSTRA : CapAtomicCmpSwapPostRA; +def CAP_ATOMIC_CMP_SWAP_CAP_EXACT_POSTRA : CapAtomicCmpSwapPostRA; // TODO: // def CAP_ATOMIC_LOAD_ADD_CAP_POSTRA : CapAtomic2OpsPostRA; // def CAP_ATOMIC_LOAD_SUB_CAP_POSTRA : CapAtomic2OpsPostRA; @@ -849,8 +851,8 @@ def : MipsPat<(atomic_store_cap GPR64Opnd:$a, CheriOpnd:$v), (STORECAP $v, GPR64Opnd:$a, (i64 0), DDC)>; def : MipsPat<(atomic_swap_cap GPR64Opnd:$a, CheriOpnd:$swap), (CAP_ATOMIC_SWAP_CAP (CFromPtr DDC, GPR64Opnd:$a), CheriOpnd:$swap)>; -def : MipsPat<(atomic_cmp_swap_cap GPR64Opnd:$a, CheriOpnd:$cmp, CheriOpnd:$swap), - (CAP_ATOMIC_CMP_SWAP_CAP (CFromPtr DDC, GPR64Opnd:$a), CheriOpnd:$cmp, CheriOpnd:$swap)>; +def : MipsPat<(atomic_cmp_swap_cap_addr GPR64Opnd:$a, CheriOpnd:$cmp, CheriOpnd:$swap), + (CAP_ATOMIC_CMP_SWAP_CAP_ADDR (CFromPtr DDC, GPR64Opnd:$a), CheriOpnd:$cmp, CheriOpnd:$swap)>; } //////////////////////////////////////////////////////////////////////////////// // Helpers for capability-using calls and returns diff --git a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp index d19dc6cd6916..5f6e4ba3d884 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp @@ -160,7 +160,8 @@ bool RISCVExpandAtomicPseudo::expandMI(MachineBasicBlock &MBB, case RISCV::PseudoAtomicLoadUMinCap: return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, false, CLenVT, false, NextMBBI); - case RISCV::PseudoCmpXchgCap: + case RISCV::PseudoCmpXchgCapAddr: + case RISCV::PseudoCmpXchgCapExact: return expandAtomicCmpXchg(MBB, MBBI, false, CLenVT, false, NextMBBI); case RISCV::PseudoCheriAtomicSwap8: return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, false, MVT::i8, @@ -272,7 +273,8 @@ bool RISCVExpandAtomicPseudo::expandMI(MachineBasicBlock &MBB, case RISCV::PseudoCheriAtomicLoadUMinCap: return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, false, CLenVT, true, NextMBBI); - case RISCV::PseudoCheriCmpXchgCap: + case RISCV::PseudoCheriCmpXchgCapAddr: + case RISCV::PseudoCheriCmpXchgCapExact: return expandAtomicCmpXchg(MBB, MBBI, false, CLenVT, true, NextMBBI); } @@ -1020,8 +1022,9 @@ bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg( BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(PtrIsCap, Ordering, VT)), DestReg) .addReg(AddrReg); - assert(MI.hasOneMemOperand()); - if (VT.isFatPointer() && MI.memoperands()[0]->isExactCompare()) { + bool ExactCapCompare = MI.getOpcode() == RISCV::PseudoCmpXchgCapExact || + MI.getOpcode() == RISCV::PseudoCheriCmpXchgCapExact; + if (VT.isFatPointer() && ExactCapCompare) { BuildMI(LoopHeadMBB, DL, TII->get(RISCV::CSEQX), ScratchReg) .addReg(DestReg, 0) .addReg(CmpValReg, 0); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td index 9f3948ac98e9..6199e12a3c63 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCheri.td @@ -1594,7 +1594,8 @@ def PseudoAtomicLoadMinCap : PseudoAMO { let Size = 24; } def PseudoAtomicLoadUMaxCap : PseudoAMO { let Size = 24; } def PseudoAtomicLoadUMinCap : PseudoAMO { let Size = 24; } def PseudoAtomicLoadNandCap : PseudoAMO { let Size = 24; } -def PseudoCmpXchgCap : PseudoCmpXchg { let Size = 16; } +def PseudoCmpXchgCapAddr : PseudoCmpXchg { let Size = 16; } +def PseudoCmpXchgCapExact : PseudoCmpXchg { let Size = 16; } } // Predicates = [HasCheri, HasStdExtA]f let Predicates = [HasCheri, HasStdExtA, NotCapMode] in { @@ -1608,7 +1609,8 @@ defm : PseudoAMOPat<"atomic_load_min_cap", PseudoAtomicLoadMinCap, GPCR>; defm : PseudoAMOPat<"atomic_load_umax_cap", PseudoAtomicLoadUMaxCap, GPCR>; defm : PseudoAMOPat<"atomic_load_umin_cap", PseudoAtomicLoadUMinCap, GPCR>; defm : PseudoAMOPat<"atomic_load_nand_cap", PseudoAtomicLoadNandCap, GPCR>; -defm : PseudoCmpXchgPat<"atomic_cmp_swap_cap", PseudoCmpXchgCap, GPCR>; +defm : PseudoCmpXchgPat<"atomic_cmp_swap_cap_addr", PseudoCmpXchgCapAddr, GPCR>; +defm : PseudoCmpXchgPat<"atomic_cmp_swap_cap_exact", PseudoCmpXchgCapExact, GPCR>; } // Predicates = [HasCheri, HasStdExtA, NotCapMode] /// Capability Mode Instructions @@ -1751,7 +1753,8 @@ def PseudoCheriAtomicLoadMinCap : PseudoCheriAMO { let Size = 24; } def PseudoCheriAtomicLoadUMaxCap : PseudoCheriAMO { let Size = 24; } def PseudoCheriAtomicLoadUMinCap : PseudoCheriAMO { let Size = 24; } def PseudoCheriAtomicLoadNandCap : PseudoCheriAMO { let Size = 24; } -def PseudoCheriCmpXchgCap : PseudoCheriCmpXchg { let Size = 16; } +def PseudoCheriCmpXchgCapAddr : PseudoCheriCmpXchg { let Size = 16; } +def PseudoCheriCmpXchgCapExact : PseudoCheriCmpXchg { let Size = 16; } } // Predicates = [HasCheri, HasStdExtA] let Predicates = [HasCheri, HasStdExtA, IsRV64] in { @@ -1950,7 +1953,8 @@ defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_8", PseudoCheriCmpXchg8>; defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_16", PseudoCheriCmpXchg16>; defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_32", PseudoCheriCmpXchg32>; -defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_cap", PseudoCheriCmpXchgCap, GPCR>; +defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_cap_addr", PseudoCheriCmpXchgCapAddr, GPCR>; +defm : PseudoCheriCmpXchgPat<"atomic_cmp_swap_cap_exact", PseudoCheriCmpXchgCapExact, GPCR>; } // Predicates = [HasCheri, HasStdExtA, IsCapMode] diff --git a/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll b/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll index 57ee81ce26ae..ca08047500ba 100644 --- a/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll +++ b/llvm/test/CodeGen/CHERI-Generic/Inputs/cmpxchg-exact-branch-folder.ll @@ -2,15 +2,17 @@ ; CHERI-GENERIC-UTC: mir @IF-RISCV@; RUN: llc @PURECAP_HARDFLOAT_ARGS@ -mattr=+a < %s --stop-after=branch-folder | FileCheck %s --check-prefixes=MIR @IFNOT-RISCV@; RUN: llc @PURECAP_HARDFLOAT_ARGS@ < %s --stop-after=branch-folder --enable-tail-merge | FileCheck %s --check-prefixes=MIR -@IF-RISCV@; RUN: not --crash llc @PURECAP_HARDFLOAT_ARGS@ -mattr=+a < %s -@IFNOT-RISCV@; RUN: not --crash llc @PURECAP_HARDFLOAT_ARGS@ --enable-tail-merge < %s +; Note: cat %s is needed so that update_mir_test_checks.py does not process these RUN lines. +@IF-RISCV@; RUN: cat %s | llc @PURECAP_HARDFLOAT_ARGS@ -mattr=+a | FileCheck %s +@IFNOT-RISCV@; RUN: cat %s | llc @PURECAP_HARDFLOAT_ARGS@ --enable-tail-merge | FileCheck %s ; REQUIRES: asserts ; The branch-folder MIR pass will merge the two blocks inside these functions but ; since the base pointer is distinct it will have two MachineMemOperands. ; The cmpxchg exact logic stored the exact flag in the MachineMemOperand and ; previously assumed there would only ever be one operand, so this test ensures -; we can handle the merged logic. +; we can handle the merged logic by adding separate pseudo instructions (which +; ensures that the branches with different comparisons can no longer be merged). define dso_local signext i32 @merge_i32(i1 %cond1, ptr addrspace(200) %ptr, i32 %newval, i32 %cmpval) { entry: @@ -66,7 +68,6 @@ end: ret i32 0 } -; FIXME: these two branches should not be merged! define dso_local signext i32 @merge_ptr_mismatch_exact_flag(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { entry: br i1 %cond1, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-exact-branch-folder.ll b/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-exact-branch-folder.ll index ec1750b4e589..3e3ee4c0a633 100644 --- a/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-exact-branch-folder.ll +++ b/llvm/test/CodeGen/CHERI-Generic/MIPS/cmpxchg-exact-branch-folder.ll @@ -4,16 +4,38 @@ ; CHERI-GENERIC-UTC: llc ; CHERI-GENERIC-UTC: mir ; RUN: llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap < %s --stop-after=branch-folder --enable-tail-merge | FileCheck %s --check-prefixes=MIR -; RUN: not --crash llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap --enable-tail-merge < %s +; RUN: cat %s | llc -mtriple=mips64 -mcpu=cheri128 -mattr=+cheri128 --relocation-model=pic -target-abi purecap --enable-tail-merge | FileCheck %s ; REQUIRES: asserts ; The branch-folder MIR pass will merge the two blocks inside these functions but ; since the base pointer is distinct it will have two MachineMemOperands. ; The cmpxchg exact logic stored the exact flag in the MachineMemOperand and ; previously assumed there would only ever be one operand, so this test ensures -; we can handle the merged logic. +; we can handle the merged logic by adding separate pseudo instructions (which +; ensures that the branches with different comparisons can no longer be merged). define dso_local signext i32 @merge_i32(i1 %cond1, ptr addrspace(200) %ptr, i32 %newval, i32 %cmpval) { +; CHECK-LABEL: merge_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sll $2, $6, 0 +; CHECK-NEXT: sll $1, $4, 0 +; CHECK-NEXT: andi $1, $1, 1 +; CHECK-NEXT: sll $3, $5, 0 +; CHECK-NEXT: clc $c1, $zero, 0($c3) +; CHECK-NEXT: sync +; CHECK-NEXT: .LBB0_1: # %entry +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: cllw $1, $c1 +; CHECK-NEXT: bne $1, $2, .LBB0_3 +; CHECK-NEXT: nop +; CHECK-NEXT: # %bb.2: # %entry +; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1 +; CHECK-NEXT: cscw $4, $3, $c1 +; CHECK-NEXT: beqz $4, .LBB0_1 +; CHECK-NEXT: nop +; CHECK-NEXT: .LBB0_3: # %entry +; CHECK-NEXT: cjr $c17 +; CHECK-NEXT: daddiu $2, $zero, 0 ; MIR-LABEL: name: merge_i32 ; MIR: bb.0.entry: ; MIR-NEXT: liveins: $c3, $a0_64, $a1_64, $a2_64 @@ -24,7 +46,7 @@ define dso_local signext i32 @merge_i32(i1 %cond1, ptr addrspace(200) %ptr, i32 ; MIR-NEXT: renamable $v1 = SLL renamable $a1, 0, implicit killed $a1_64 ; MIR-NEXT: renamable $c1 = LOADCAP $zero_64, 0, killed renamable $c3 :: (load (s128) from %ir.ptr, addrspace 200) ; MIR-NEXT: SYNC 0 - ; MIR-NEXT: dead early-clobber renamable $at = CAP_ATOMIC_CMP_SWAP_I32_POSTRA killed renamable $c1, killed renamable $v0, killed renamable $v1, implicit-def dead early-clobber renamable $a0 :: (load store monotonic monotonic (s32) on %ir.ld2, addrspace 200), (load store monotonic monotonic (s32) on %ir.ld1, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $at = CAP_ATOMIC_CMP_SWAP_I32_POSTRA killed renamable $c1, killed renamable $v0, killed renamable $v1, implicit-def dead early-clobber renamable $a0 ; MIR-NEXT: $v0_64 = DADDiu $zero_64, 0 ; MIR-NEXT: CapRetPseudo implicit $v0_64 entry: @@ -45,6 +67,26 @@ end: } define dso_local signext i32 @merge_ptr_addr(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +; CHECK-LABEL: merge_ptr_addr: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sll $1, $4, 0 +; CHECK-NEXT: andi $1, $1, 1 +; CHECK-NEXT: clc $c1, $zero, 0($c3) +; CHECK-NEXT: sync +; CHECK-NEXT: .LBB1_1: # %entry +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: cllc $c2, $c1 +; CHECK-NEXT: ceq $1, $c2, $c5 +; CHECK-NEXT: beqz $1, .LBB1_3 +; CHECK-NEXT: nop +; CHECK-NEXT: # %bb.2: # %entry +; CHECK-NEXT: # in Loop: Header=BB1_1 Depth=1 +; CHECK-NEXT: cscc $1, $c4, $c1 +; CHECK-NEXT: beqz $1, .LBB1_1 +; CHECK-NEXT: nop +; CHECK-NEXT: .LBB1_3: # %entry +; CHECK-NEXT: cjr $c17 +; CHECK-NEXT: daddiu $2, $zero, 0 ; MIR-LABEL: name: merge_ptr_addr ; MIR: bb.0.entry: ; MIR-NEXT: liveins: $c3, $c4, $c5, $a0_64 @@ -53,7 +95,7 @@ define dso_local signext i32 @merge_ptr_addr(i1 %cond1, ptr addrspace(200) %ptr, ; MIR-NEXT: renamable $at = ANDi killed renamable $at, 1 ; MIR-NEXT: renamable $c1 = LOADCAP $zero_64, 0, killed renamable $c3 :: (load (s128) from %ir.ptr, addrspace 200) ; MIR-NEXT: SYNC 0 - ; MIR-NEXT: dead early-clobber renamable $c2 = CAP_ATOMIC_CMP_SWAP_CAP_POSTRA killed renamable $c1, killed renamable $c5, killed renamable $c4, implicit-def dead early-clobber renamable $at_64 :: (load store monotonic monotonic (s128) on %ir.ld2, addrspace 200), (load store monotonic monotonic (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c2 = CAP_ATOMIC_CMP_SWAP_CAP_ADDR_POSTRA killed renamable $c1, killed renamable $c5, killed renamable $c4, implicit-def dead early-clobber renamable $at_64 ; MIR-NEXT: $v0_64 = DADDiu $zero_64, 0 ; MIR-NEXT: CapRetPseudo implicit killed $v0_64 entry: @@ -74,6 +116,26 @@ end: } define dso_local signext i32 @merge_ptr_exact(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +; CHECK-LABEL: merge_ptr_exact: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sll $1, $4, 0 +; CHECK-NEXT: andi $1, $1, 1 +; CHECK-NEXT: clc $c1, $zero, 0($c3) +; CHECK-NEXT: sync +; CHECK-NEXT: .LBB2_1: # %entry +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: cllc $c2, $c1 +; CHECK-NEXT: cexeq $1, $c2, $c5 +; CHECK-NEXT: beqz $1, .LBB2_3 +; CHECK-NEXT: nop +; CHECK-NEXT: # %bb.2: # %entry +; CHECK-NEXT: # in Loop: Header=BB2_1 Depth=1 +; CHECK-NEXT: cscc $1, $c4, $c1 +; CHECK-NEXT: beqz $1, .LBB2_1 +; CHECK-NEXT: nop +; CHECK-NEXT: .LBB2_3: # %entry +; CHECK-NEXT: cjr $c17 +; CHECK-NEXT: daddiu $2, $zero, 0 ; MIR-LABEL: name: merge_ptr_exact ; MIR: bb.0.entry: ; MIR-NEXT: liveins: $c3, $c4, $c5, $a0_64 @@ -82,7 +144,7 @@ define dso_local signext i32 @merge_ptr_exact(i1 %cond1, ptr addrspace(200) %ptr ; MIR-NEXT: renamable $at = ANDi killed renamable $at, 1 ; MIR-NEXT: renamable $c1 = LOADCAP $zero_64, 0, killed renamable $c3 :: (load (s128) from %ir.ptr, addrspace 200) ; MIR-NEXT: SYNC 0 - ; MIR-NEXT: dead early-clobber renamable $c2 = CAP_ATOMIC_CMP_SWAP_CAP_POSTRA killed renamable $c1, killed renamable $c5, killed renamable $c4, implicit-def dead early-clobber renamable $at_64 :: (load store monotonic monotonic exact (s128) on %ir.ld2, addrspace 200), (load store monotonic monotonic exact (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c2 = CAP_ATOMIC_CMP_SWAP_CAP_EXACT_POSTRA killed renamable $c1, killed renamable $c5, killed renamable $c4, implicit-def dead early-clobber renamable $at_64 ; MIR-NEXT: $v0_64 = DADDiu $zero_64, 0 ; MIR-NEXT: CapRetPseudo implicit killed $v0_64 entry: @@ -102,17 +164,74 @@ end: ret i32 0 } -; FIXME: these two branches should not be merged! define dso_local signext i32 @merge_ptr_mismatch_exact_flag(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +; CHECK-LABEL: merge_ptr_mismatch_exact_flag: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sll $1, $4, 0 +; CHECK-NEXT: andi $1, $1, 1 +; CHECK-NEXT: beqz $1, .LBB3_5 +; CHECK-NEXT: nop +; CHECK-NEXT: # %bb.1: # %if.then +; CHECK-NEXT: clc $c1, $zero, 0($c3) +; CHECK-NEXT: sync +; CHECK-NEXT: .LBB3_2: # %if.then +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: cllc $c2, $c1 +; CHECK-NEXT: cexeq $1, $c2, $c5 +; CHECK-NEXT: beqz $1, .LBB3_4 +; CHECK-NEXT: nop +; CHECK-NEXT: # %bb.3: # %if.then +; CHECK-NEXT: # in Loop: Header=BB3_2 Depth=1 +; CHECK-NEXT: cscc $1, $c4, $c1 +; CHECK-NEXT: beqz $1, .LBB3_2 +; CHECK-NEXT: nop +; CHECK-NEXT: .LBB3_4: # %if.then +; CHECK-NEXT: cjr $c17 +; CHECK-NEXT: daddiu $2, $zero, 0 +; CHECK-NEXT: .LBB3_5: # %if.else +; CHECK-NEXT: clc $c1, $zero, 0($c3) +; CHECK-NEXT: sync +; CHECK-NEXT: .LBB3_6: # %if.else +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: cllc $c2, $c1 +; CHECK-NEXT: ceq $1, $c2, $c5 +; CHECK-NEXT: beqz $1, .LBB3_8 +; CHECK-NEXT: nop +; CHECK-NEXT: # %bb.7: # %if.else +; CHECK-NEXT: # in Loop: Header=BB3_6 Depth=1 +; CHECK-NEXT: cscc $1, $c4, $c1 +; CHECK-NEXT: beqz $1, .LBB3_6 +; CHECK-NEXT: nop +; CHECK-NEXT: .LBB3_8: # %if.else +; CHECK-NEXT: cjr $c17 +; CHECK-NEXT: daddiu $2, $zero, 0 ; MIR-LABEL: name: merge_ptr_mismatch_exact_flag ; MIR: bb.0.entry: + ; MIR-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; MIR-NEXT: liveins: $c3, $c4, $c5, $a0_64 ; MIR-NEXT: {{ $}} ; MIR-NEXT: renamable $at = SLL renamable $a0, 0, implicit killed $a0_64 ; MIR-NEXT: renamable $at = ANDi killed renamable $at, 1 + ; MIR-NEXT: BEQ killed renamable $at, $zero, %bb.2, implicit-def $at + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: bb.1.if.then: + ; MIR-NEXT: successors: %bb.3(0x80000000) + ; MIR-NEXT: liveins: $c3, $c4, $c5 + ; MIR-NEXT: {{ $}} ; MIR-NEXT: renamable $c1 = LOADCAP $zero_64, 0, killed renamable $c3 :: (load (s128) from %ir.ptr, addrspace 200) ; MIR-NEXT: SYNC 0 - ; MIR-NEXT: dead early-clobber renamable $c2 = CAP_ATOMIC_CMP_SWAP_CAP_POSTRA killed renamable $c1, killed renamable $c5, killed renamable $c4, implicit-def dead early-clobber renamable $at_64 :: (load store monotonic monotonic (s128) on %ir.ld2, addrspace 200), (load store monotonic monotonic exact (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c2 = CAP_ATOMIC_CMP_SWAP_CAP_EXACT_POSTRA killed renamable $c1, killed renamable $c5, killed renamable $c4, implicit-def dead early-clobber renamable $at_64 + ; MIR-NEXT: B %bb.3, implicit-def $at + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: bb.2.if.else: + ; MIR-NEXT: successors: %bb.3(0x80000000) + ; MIR-NEXT: liveins: $c3, $c4, $c5 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $c1 = LOADCAP $zero_64, 0, killed renamable $c3 :: (load (s128) from %ir.ptr, addrspace 200) + ; MIR-NEXT: SYNC 0 + ; MIR-NEXT: dead early-clobber renamable $c2 = CAP_ATOMIC_CMP_SWAP_CAP_ADDR_POSTRA killed renamable $c1, killed renamable $c5, killed renamable $c4, implicit-def dead early-clobber renamable $at_64 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: bb.3.end: ; MIR-NEXT: $v0_64 = DADDiu $zero_64, 0 ; MIR-NEXT: CapRetPseudo implicit killed $v0_64 entry: diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-exact-branch-folder.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-exact-branch-folder.ll index cc3940f55040..88637e638a72 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-exact-branch-folder.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV32/cmpxchg-exact-branch-folder.ll @@ -4,16 +4,32 @@ ; CHERI-GENERIC-UTC: llc ; CHERI-GENERIC-UTC: mir ; RUN: llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -mattr=+a < %s --stop-after=branch-folder | FileCheck %s --check-prefixes=MIR -; RUN: not --crash llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -mattr=+a < %s +; RUN: cat %s | llc -mtriple=riscv32 --relocation-model=pic -target-abi il32pc64f -mattr=+xcheri,+cap-mode,+f -mattr=+a | FileCheck %s ; REQUIRES: asserts ; The branch-folder MIR pass will merge the two blocks inside these functions but ; since the base pointer is distinct it will have two MachineMemOperands. ; The cmpxchg exact logic stored the exact flag in the MachineMemOperand and ; previously assumed there would only ever be one operand, so this test ensures -; we can handle the merged logic. +; we can handle the merged logic by adding separate pseudo instructions (which +; ensures that the branches with different comparisons can no longer be merged). define dso_local signext i32 @merge_i32(i1 %cond1, ptr addrspace(200) %ptr, i32 %newval, i32 %cmpval) { +; CHECK-LABEL: merge_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: clc ca0, 0(ca1) +; CHECK-NEXT: .LBB0_1: # %entry +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: clr.w a1, (ca0) +; CHECK-NEXT: bne a1, a3, .LBB0_3 +; CHECK-NEXT: # %bb.2: # %entry +; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1 +; CHECK-NEXT: csc.w.rl a4, a2, (ca0) +; CHECK-NEXT: bnez a4, .LBB0_1 +; CHECK-NEXT: .LBB0_3: # %entry +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: cret ; MIR-LABEL: name: merge_i32 ; MIR: bb.0.entry: ; MIR-NEXT: liveins: $c11, $x10, $x12, $x13 @@ -41,13 +57,28 @@ end: } define dso_local signext i32 @merge_ptr_addr(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +; CHECK-LABEL: merge_ptr_addr: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: clc ca0, 0(ca1) +; CHECK-NEXT: .LBB1_1: # %entry +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: clr.c.rl ca1, (ca0) +; CHECK-NEXT: bne a1, a3, .LBB1_3 +; CHECK-NEXT: # %bb.2: # %entry +; CHECK-NEXT: # in Loop: Header=BB1_1 Depth=1 +; CHECK-NEXT: csc.c a4, ca2, (ca0) +; CHECK-NEXT: bnez a4, .LBB1_1 +; CHECK-NEXT: .LBB1_3: # %entry +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: cret ; MIR-LABEL: name: merge_ptr_addr ; MIR: bb.0.entry: ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 ; MIR-NEXT: {{ $}} ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 ; MIR-NEXT: renamable $c10 = CLC_64 killed renamable $c11, 0 :: (load (s64) from %ir.ptr, align 16, addrspace 200) - ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s64) on %ir.ld2, addrspace 200), (load store release monotonic (s64) on %ir.ld1, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCapAddr killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s64) on %ir.ld2, addrspace 200), (load store release monotonic (s64) on %ir.ld1, addrspace 200) ; MIR-NEXT: $x10 = COPY $x0 ; MIR-NEXT: PseudoCRET implicit $x10 entry: @@ -68,13 +99,29 @@ end: } define dso_local signext i32 @merge_ptr_exact(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +; CHECK-LABEL: merge_ptr_exact: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: clc ca0, 0(ca1) +; CHECK-NEXT: .LBB2_1: # %entry +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: clr.c.rl ca1, (ca0) +; CHECK-NEXT: cseqx a4, ca1, ca3 +; CHECK-NEXT: beqz a4, .LBB2_3 +; CHECK-NEXT: # %bb.2: # %entry +; CHECK-NEXT: # in Loop: Header=BB2_1 Depth=1 +; CHECK-NEXT: csc.c a4, ca2, (ca0) +; CHECK-NEXT: bnez a4, .LBB2_1 +; CHECK-NEXT: .LBB2_3: # %entry +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: cret ; MIR-LABEL: name: merge_ptr_exact ; MIR: bb.0.entry: ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 ; MIR-NEXT: {{ $}} ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 ; MIR-NEXT: renamable $c10 = CLC_64 killed renamable $c11, 0 :: (load (s64) from %ir.ptr, align 16, addrspace 200) - ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic exact (s64) on %ir.ld2, addrspace 200), (load store release monotonic exact (s64) on %ir.ld1, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCapExact killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic exact (s64) on %ir.ld2, addrspace 200), (load store release monotonic exact (s64) on %ir.ld1, addrspace 200) ; MIR-NEXT: $x10 = COPY $x0 ; MIR-NEXT: PseudoCRET implicit $x10 entry: @@ -94,15 +141,62 @@ end: ret i32 0 } -; FIXME: these two branches should not be merged! define dso_local signext i32 @merge_ptr_mismatch_exact_flag(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +; CHECK-LABEL: merge_ptr_mismatch_exact_flag: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: beqz a0, .LBB3_2 +; CHECK-NEXT: # %bb.1: # %if.then +; CHECK-NEXT: clc ca0, 0(ca1) +; CHECK-NEXT: .LBB3_3: # %if.then +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: clr.c.rl ca1, (ca0) +; CHECK-NEXT: cseqx a4, ca1, ca3 +; CHECK-NEXT: beqz a4, .LBB3_5 +; CHECK-NEXT: # %bb.4: # %if.then +; CHECK-NEXT: # in Loop: Header=BB3_3 Depth=1 +; CHECK-NEXT: csc.c a4, ca2, (ca0) +; CHECK-NEXT: bnez a4, .LBB3_3 +; CHECK-NEXT: .LBB3_5: # %if.then +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: cret +; CHECK-NEXT: .LBB3_2: # %if.else +; CHECK-NEXT: clc ca0, 0(ca1) +; CHECK-NEXT: .LBB3_6: # %if.else +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: clr.c.rl ca1, (ca0) +; CHECK-NEXT: bne a1, a3, .LBB3_8 +; CHECK-NEXT: # %bb.7: # %if.else +; CHECK-NEXT: # in Loop: Header=BB3_6 Depth=1 +; CHECK-NEXT: csc.c a4, ca2, (ca0) +; CHECK-NEXT: bnez a4, .LBB3_6 +; CHECK-NEXT: .LBB3_8: # %if.else +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: cret ; MIR-LABEL: name: merge_ptr_mismatch_exact_flag ; MIR: bb.0.entry: + ; MIR-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 ; MIR-NEXT: {{ $}} ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 + ; MIR-NEXT: BEQ killed renamable $x10, $x0, %bb.2 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: bb.1.if.then: + ; MIR-NEXT: successors: %bb.3(0x80000000) + ; MIR-NEXT: liveins: $c11, $c12, $c13 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $c10 = CLC_64 killed renamable $c11, 0 :: (load (s64) from %ir.ptr, align 16, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCapExact killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic exact (s64) on %ir.ld1, addrspace 200) + ; MIR-NEXT: PseudoCBR %bb.3 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: bb.2.if.else: + ; MIR-NEXT: successors: %bb.3(0x80000000) + ; MIR-NEXT: liveins: $c11, $c12, $c13 + ; MIR-NEXT: {{ $}} ; MIR-NEXT: renamable $c10 = CLC_64 killed renamable $c11, 0 :: (load (s64) from %ir.ptr, align 16, addrspace 200) - ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s64) on %ir.ld2, addrspace 200), (load store release monotonic exact (s64) on %ir.ld1, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCapAddr killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s64) on %ir.ld2, addrspace 200) + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: bb.3.end: ; MIR-NEXT: $x10 = COPY $x0 ; MIR-NEXT: PseudoCRET implicit $x10 entry: diff --git a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-exact-branch-folder.ll b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-exact-branch-folder.ll index d6bf459f2eb8..3d407267ac37 100644 --- a/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-exact-branch-folder.ll +++ b/llvm/test/CodeGen/CHERI-Generic/RISCV64/cmpxchg-exact-branch-folder.ll @@ -4,16 +4,33 @@ ; CHERI-GENERIC-UTC: llc ; CHERI-GENERIC-UTC: mir ; RUN: llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -mattr=+a < %s --stop-after=branch-folder | FileCheck %s --check-prefixes=MIR -; RUN: not --crash llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -mattr=+a < %s +; RUN: cat %s | llc -mtriple=riscv64 --relocation-model=pic -target-abi l64pc128d -mattr=+xcheri,+cap-mode,+f,+d -mattr=+a | FileCheck %s ; REQUIRES: asserts ; The branch-folder MIR pass will merge the two blocks inside these functions but ; since the base pointer is distinct it will have two MachineMemOperands. ; The cmpxchg exact logic stored the exact flag in the MachineMemOperand and ; previously assumed there would only ever be one operand, so this test ensures -; we can handle the merged logic. +; we can handle the merged logic by adding separate pseudo instructions (which +; ensures that the branches with different comparisons can no longer be merged). define dso_local signext i32 @merge_i32(i1 %cond1, ptr addrspace(200) %ptr, i32 %newval, i32 %cmpval) { +; CHECK-LABEL: merge_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: clc ca0, 0(ca1) +; CHECK-NEXT: sext.w a1, a3 +; CHECK-NEXT: .LBB0_1: # %entry +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: clr.w a3, (ca0) +; CHECK-NEXT: bne a3, a1, .LBB0_3 +; CHECK-NEXT: # %bb.2: # %entry +; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1 +; CHECK-NEXT: csc.w.rl a4, a2, (ca0) +; CHECK-NEXT: bnez a4, .LBB0_1 +; CHECK-NEXT: .LBB0_3: # %entry +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: cret ; MIR-LABEL: name: merge_i32 ; MIR: bb.0.entry: ; MIR-NEXT: liveins: $c11, $x10, $x12, $x13 @@ -42,13 +59,28 @@ end: } define dso_local signext i32 @merge_ptr_addr(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +; CHECK-LABEL: merge_ptr_addr: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: clc ca0, 0(ca1) +; CHECK-NEXT: .LBB1_1: # %entry +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: clr.c.rl ca1, (ca0) +; CHECK-NEXT: bne a1, a3, .LBB1_3 +; CHECK-NEXT: # %bb.2: # %entry +; CHECK-NEXT: # in Loop: Header=BB1_1 Depth=1 +; CHECK-NEXT: csc.c a4, ca2, (ca0) +; CHECK-NEXT: bnez a4, .LBB1_1 +; CHECK-NEXT: .LBB1_3: # %entry +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: cret ; MIR-LABEL: name: merge_ptr_addr ; MIR: bb.0.entry: ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 ; MIR-NEXT: {{ $}} ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 ; MIR-NEXT: renamable $c10 = CLC_128 killed renamable $c11, 0 :: (load (s128) from %ir.ptr, addrspace 200) - ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s128) on %ir.ld2, addrspace 200), (load store release monotonic (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCapAddr killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s128) on %ir.ld2, addrspace 200), (load store release monotonic (s128) on %ir.ld1, addrspace 200) ; MIR-NEXT: $x10 = COPY $x0 ; MIR-NEXT: PseudoCRET implicit $x10 entry: @@ -69,13 +101,29 @@ end: } define dso_local signext i32 @merge_ptr_exact(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +; CHECK-LABEL: merge_ptr_exact: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: clc ca0, 0(ca1) +; CHECK-NEXT: .LBB2_1: # %entry +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: clr.c.rl ca1, (ca0) +; CHECK-NEXT: cseqx a4, ca1, ca3 +; CHECK-NEXT: beqz a4, .LBB2_3 +; CHECK-NEXT: # %bb.2: # %entry +; CHECK-NEXT: # in Loop: Header=BB2_1 Depth=1 +; CHECK-NEXT: csc.c a4, ca2, (ca0) +; CHECK-NEXT: bnez a4, .LBB2_1 +; CHECK-NEXT: .LBB2_3: # %entry +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: cret ; MIR-LABEL: name: merge_ptr_exact ; MIR: bb.0.entry: ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 ; MIR-NEXT: {{ $}} ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 ; MIR-NEXT: renamable $c10 = CLC_128 killed renamable $c11, 0 :: (load (s128) from %ir.ptr, addrspace 200) - ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic exact (s128) on %ir.ld2, addrspace 200), (load store release monotonic exact (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCapExact killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic exact (s128) on %ir.ld2, addrspace 200), (load store release monotonic exact (s128) on %ir.ld1, addrspace 200) ; MIR-NEXT: $x10 = COPY $x0 ; MIR-NEXT: PseudoCRET implicit $x10 entry: @@ -95,15 +143,62 @@ end: ret i32 0 } -; FIXME: these two branches should not be merged! define dso_local signext i32 @merge_ptr_mismatch_exact_flag(i1 %cond1, ptr addrspace(200) %ptr, ptr addrspace(200) %newval, ptr addrspace(200) %cmpval) { +; CHECK-LABEL: merge_ptr_mismatch_exact_flag: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: beqz a0, .LBB3_2 +; CHECK-NEXT: # %bb.1: # %if.then +; CHECK-NEXT: clc ca0, 0(ca1) +; CHECK-NEXT: .LBB3_3: # %if.then +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: clr.c.rl ca1, (ca0) +; CHECK-NEXT: cseqx a4, ca1, ca3 +; CHECK-NEXT: beqz a4, .LBB3_5 +; CHECK-NEXT: # %bb.4: # %if.then +; CHECK-NEXT: # in Loop: Header=BB3_3 Depth=1 +; CHECK-NEXT: csc.c a4, ca2, (ca0) +; CHECK-NEXT: bnez a4, .LBB3_3 +; CHECK-NEXT: .LBB3_5: # %if.then +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: cret +; CHECK-NEXT: .LBB3_2: # %if.else +; CHECK-NEXT: clc ca0, 0(ca1) +; CHECK-NEXT: .LBB3_6: # %if.else +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: clr.c.rl ca1, (ca0) +; CHECK-NEXT: bne a1, a3, .LBB3_8 +; CHECK-NEXT: # %bb.7: # %if.else +; CHECK-NEXT: # in Loop: Header=BB3_6 Depth=1 +; CHECK-NEXT: csc.c a4, ca2, (ca0) +; CHECK-NEXT: bnez a4, .LBB3_6 +; CHECK-NEXT: .LBB3_8: # %if.else +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: cret ; MIR-LABEL: name: merge_ptr_mismatch_exact_flag ; MIR: bb.0.entry: + ; MIR-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; MIR-NEXT: liveins: $c11, $c12, $c13, $x10 ; MIR-NEXT: {{ $}} ; MIR-NEXT: renamable $x10 = ANDI killed renamable $x10, 1 + ; MIR-NEXT: BEQ killed renamable $x10, $x0, %bb.2 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: bb.1.if.then: + ; MIR-NEXT: successors: %bb.3(0x80000000) + ; MIR-NEXT: liveins: $c11, $c12, $c13 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: renamable $c10 = CLC_128 killed renamable $c11, 0 :: (load (s128) from %ir.ptr, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCapExact killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic exact (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: PseudoCBR %bb.3 + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: bb.2.if.else: + ; MIR-NEXT: successors: %bb.3(0x80000000) + ; MIR-NEXT: liveins: $c11, $c12, $c13 + ; MIR-NEXT: {{ $}} ; MIR-NEXT: renamable $c10 = CLC_128 killed renamable $c11, 0 :: (load (s128) from %ir.ptr, addrspace 200) - ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCap killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s128) on %ir.ld2, addrspace 200), (load store release monotonic exact (s128) on %ir.ld1, addrspace 200) + ; MIR-NEXT: dead early-clobber renamable $c11, dead early-clobber renamable $x14 = PseudoCheriCmpXchgCapAddr killed renamable $c10, killed renamable $c13, killed renamable $c12, 5 :: (load store release monotonic (s128) on %ir.ld2, addrspace 200) + ; MIR-NEXT: {{ $}} + ; MIR-NEXT: bb.3.end: ; MIR-NEXT: $x10 = COPY $x0 ; MIR-NEXT: PseudoCRET implicit $x10 entry: