Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CHERI-RISC-V] Report true for __atomic_always_lock_free(sizeof(__intcap)) #721

Open
wants to merge 18 commits into
base: dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
0fe7c3a
Add an "exact" flag to AtomicCmpXchgInst
arichardson Sep 20, 2023
f680e93
Add an ExactCompare flag to MachineMemOperand
arichardson Sep 21, 2023
3bfa7ac
[CHERI-Generic] Update a test to use opaque pointers
arichardson Sep 21, 2023
b47f510
[CHERI-Generic] Add a baseline test for cmpxchg exact
arichardson Sep 21, 2023
bf0993f
[CHERI] Correctly lower cmpxchg with the exact flag
arichardson Sep 21, 2023
67d9eb4
[CHERI] Add a baseline test for atomics on capability-size integers
arichardson Sep 19, 2023
3af2a62
[CHERI-RISC-V] Support inline atomic loads for 2*XLen integers
arichardson Sep 20, 2023
75ce173
[CHERI-RISC-V] Support inline atomic stores for 2*XLen integers
arichardson Sep 20, 2023
ae50bd6
[CHERI-RISC-V] Support inline cmpxchg for 2*XLen integers
arichardson Sep 21, 2023
f736e09
[CHERI-RISC-V] Support inline atomic RMW for 2*XLen integers
arichardson Sep 21, 2023
7ecb141
[CHERI-RISC-V] Use camoswap.c for 2*XLen integer Xchg operations
arichardson Sep 21, 2023
25303f9
[CHERI-RISC-V] Support atomic load/store with capability pointers
arichardson Sep 21, 2023
f4bd440
[CHERI-RISC-V] Support hybrid mode atomic load/store of 2*XLen integers
arichardson Sep 21, 2023
87e5054
[CHERI] Add tests for __atomic_always_lock_free(sizeof(uintptr_t))
arichardson Sep 21, 2023
2c7c2b7
[CHERI-RISC-V] Report true for __atomic_always_lock_free(sizeof(__int…
arichardson Sep 22, 2023
1dcc97c
Add a regression test for cmpxchg exact crash
arichardson Feb 16, 2024
b30abe0
[CHERI-Generic] Make it easier to add new substitutions
arichardson Feb 17, 2024
0eb4a79
[CHERI] Use separate Pseudo instructions for cmpxchg nodes
arichardson Feb 18, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 16 additions & 2 deletions clang/lib/Basic/Targets/RISCV.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,15 @@ class LLVM_LIBRARY_VISIBILITY RISCV32TargetInfo : public RISCVTargetInfo {
void setMaxAtomicWidth() override {
MaxAtomicPromoteWidth = 128;

if (ISAInfo->hasExtension("a"))
if (ISAInfo->hasExtension("a")) {
MaxAtomicInlineWidth = 32;
// With CHERI we support capability-size integer atomic operations without
// a libcall. Currently this is limited to purecap since in hybrid mode
// RMW/CMPXCHG with a capability pointer does not work yet.
// See https://github.com/CTSRD-CHERI/llvm-project/pull/490
if (CapabilityABI)
MaxAtomicInlineWidth = 64;
}
}

uint64_t getPointerRangeForCHERICapability() const override { return 32; }
Expand Down Expand Up @@ -226,8 +233,15 @@ class LLVM_LIBRARY_VISIBILITY RISCV64TargetInfo : public RISCVTargetInfo {
void setMaxAtomicWidth() override {
MaxAtomicPromoteWidth = 128;

if (ISAInfo->hasExtension("a"))
if (ISAInfo->hasExtension("a")) {
MaxAtomicInlineWidth = 64;
// With CHERI we support capability-size integer atomic operations without
// a libcall. Currently this is limited to purecap since in hybrid mode
// RMW/CMPXCHG with a capability pointer does not work yet.
// See https://github.com/CTSRD-CHERI/llvm-project/pull/490
if (CapabilityABI)
MaxAtomicInlineWidth = 128;
}
}

uint64_t getPointerRangeForCHERICapability() const override { return 64; }
Expand Down
275 changes: 275 additions & 0 deletions clang/test/CodeGen/cheri/atomic-lock-free.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,275 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature
/// Check that we emit inline atomics rather than library calls for capability-size atomics
// RUN: %riscv64_cheri_purecap_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify=purecap | opt -S -mem2reg | FileCheck %s --check-prefixes=PURECAP64
// RUN: %riscv64_cheri_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify=hybrid | opt -S -mem2reg | FileCheck %s --check-prefixes=HYBRID64
// RUN: %riscv32_cheri_purecap_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify=purecap | opt -S -mem2reg | FileCheck %s --check-prefixes=PURECAP32
// RUN: %riscv32_cheri_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify=hybrid | opt -S -mem2reg | FileCheck %s --check-prefixes=HYBRID32
// purecap-no-diagnostics

#if __CHERI_CAPABILITY_WIDTH__ == 64
typedef __INT64_TYPE__ cap_size_int;
#else
typedef __int128 cap_size_int;
#endif

// PURECAP64-LABEL: define {{[^@]+}}@load_long
// PURECAP64-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[TMP0:%.*]] = load atomic i64, ptr addrspace(200) [[L]] seq_cst, align 8
// PURECAP64-NEXT: ret i64 [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@load_long
// HYBRID64-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0:[0-9]+]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[TMP0:%.*]] = load atomic i64, ptr [[L]] seq_cst, align 8
// HYBRID64-NEXT: ret i64 [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@load_long
// PURECAP32-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[TMP0:%.*]] = load atomic i32, ptr addrspace(200) [[L]] seq_cst, align 4
// PURECAP32-NEXT: ret i32 [[TMP0]]
//
// HYBRID32-LABEL: define {{[^@]+}}@load_long
// HYBRID32-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0:[0-9]+]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[TMP0:%.*]] = load atomic i32, ptr [[L]] seq_cst, align 4
// HYBRID32-NEXT: ret i32 [[TMP0]]
//
long load_long(long* l) {
return __atomic_load_n(l, __ATOMIC_SEQ_CST);
}

// PURECAP64-LABEL: define {{[^@]+}}@load_cap
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[I]] seq_cst, align 16
// PURECAP64-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@load_cap
// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr [[I]] seq_cst, align 16
// HYBRID64-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@load_cap
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[I]] seq_cst, align 8
// PURECAP32-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// HYBRID32-LABEL: define {{[^@]+}}@load_cap
// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr [[I]] seq_cst, align 8
// HYBRID32-NEXT: ret ptr addrspace(200) [[TMP0]]
//
__intcap load_cap(__intcap* i) {
return __atomic_load_n(i, __ATOMIC_SEQ_CST);
}

// PURECAP64-LABEL: define {{[^@]+}}@loadi128
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[TMP0:%.*]] = load atomic i128, ptr addrspace(200) [[I]] seq_cst, align 16
// PURECAP64-NEXT: ret i128 [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@loadi128
// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
// HYBRID64-NEXT: call void @__atomic_load(i64 noundef 16, ptr noundef [[I]], ptr noundef [[ATOMIC_TEMP]], i32 noundef signext 5)
// HYBRID64-NEXT: [[TMP0:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 16
// HYBRID64-NEXT: ret i128 [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@loadi128
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[TMP0:%.*]] = load atomic i64, ptr addrspace(200) [[I]] seq_cst, align 8
// PURECAP32-NEXT: ret i64 [[TMP0]]
//
// HYBRID32-LABEL: define {{[^@]+}}@loadi128
// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[CALL:%.*]] = call i64 @__atomic_load_8(ptr noundef [[I]], i32 noundef 5)
// HYBRID32-NEXT: ret i64 [[CALL]]
//
cap_size_int loadi128(cap_size_int* i) {
return __atomic_load_n(i, __ATOMIC_SEQ_CST);
// hybrid-warning@-1{{large atomic operation may incur significant performance penalty}}
}

// PURECAP64-LABEL: define {{[^@]+}}@xchg_long
// PURECAP64-SAME: (ptr addrspace(200) noundef [[L:%.*]], i64 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[L]], i64 [[VAL]] seq_cst, align 8
// PURECAP64-NEXT: ret i64 [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@xchg_long
// HYBRID64-SAME: (ptr noundef [[L:%.*]], i64 noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[L]], i64 [[VAL]] seq_cst, align 8
// HYBRID64-NEXT: ret i64 [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@xchg_long
// PURECAP32-SAME: (ptr addrspace(200) noundef [[L:%.*]], i32 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[L]], i32 [[VAL]] seq_cst, align 4
// PURECAP32-NEXT: ret i32 [[TMP0]]
//
// HYBRID32-LABEL: define {{[^@]+}}@xchg_long
// HYBRID32-SAME: (ptr noundef [[L:%.*]], i32 noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[L]], i32 [[VAL]] seq_cst, align 4
// HYBRID32-NEXT: ret i32 [[TMP0]]
//
long xchg_long(long* l, long val) {
return __atomic_exchange_n(l, val, __ATOMIC_SEQ_CST);
}

// PURECAP64-LABEL: define {{[^@]+}}@xchg_cap
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 16
// PURECAP64-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@xchg_cap
// HYBRID64-SAME: (ptr noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 16
// HYBRID64-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@xchg_cap
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 8
// PURECAP32-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// HYBRID32-LABEL: define {{[^@]+}}@xchg_cap
// HYBRID32-SAME: (ptr noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 8
// HYBRID32-NEXT: ret ptr addrspace(200) [[TMP0]]
//
__intcap xchg_cap(__intcap* i, __intcap val) {
return __atomic_exchange_n(i, val, __ATOMIC_SEQ_CST);
}

// PURECAP64-LABEL: define {{[^@]+}}@xchg_i128
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]], i128 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[I]], i128 [[VAL]] seq_cst, align 16
// PURECAP64-NEXT: ret i128 [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@xchg_i128
// HYBRID64-SAME: (ptr noundef [[I:%.*]], i128 noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[DOTATOMICTMP:%.*]] = alloca i128, align 16
// HYBRID64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
// HYBRID64-NEXT: store i128 [[VAL]], ptr [[DOTATOMICTMP]], align 16
// HYBRID64-NEXT: call void @__atomic_exchange(i64 noundef 16, ptr noundef [[I]], ptr noundef [[DOTATOMICTMP]], ptr noundef [[ATOMIC_TEMP]], i32 noundef signext 5)
// HYBRID64-NEXT: [[TMP0:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 16
// HYBRID64-NEXT: ret i128 [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@xchg_i128
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]], i64 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[I]], i64 [[VAL]] seq_cst, align 8
// PURECAP32-NEXT: ret i64 [[TMP0]]
//
// HYBRID32-LABEL: define {{[^@]+}}@xchg_i128
// HYBRID32-SAME: (ptr noundef [[I:%.*]], i64 noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(ptr noundef [[I]], i64 noundef [[VAL]], i32 noundef 5)
// HYBRID32-NEXT: ret i64 [[CALL]]
//
cap_size_int xchg_i128(cap_size_int* i, cap_size_int val) {
return __atomic_exchange_n(i, val, __ATOMIC_SEQ_CST);
// hybrid-warning@-1{{large atomic operation may incur significant performance penalty}}
}

// PURECAP64-LABEL: define {{[^@]+}}@lock_free_long
// PURECAP64-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: ret i1 true
//
// HYBRID64-LABEL: define {{[^@]+}}@lock_free_long
// HYBRID64-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: ret i1 true
//
// PURECAP32-LABEL: define {{[^@]+}}@lock_free_long
// PURECAP32-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: ret i1 true
//
// HYBRID32-LABEL: define {{[^@]+}}@lock_free_long
// HYBRID32-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: ret i1 true
//
_Bool lock_free_long(long* l) {
_Static_assert(__atomic_always_lock_free(sizeof(*l), 0), "");
return __atomic_is_lock_free(sizeof(*l), l);
}

//
// FIXME: should return true here
// PURECAP64-LABEL: define {{[^@]+}}@lock_free_cap
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: ret i1 true
//
// HYBRID64-LABEL: define {{[^@]+}}@lock_free_cap
// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef [[I]])
// HYBRID64-NEXT: ret i1 [[CALL]]
//
// PURECAP32-LABEL: define {{[^@]+}}@lock_free_cap
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: ret i1 true
//
// HYBRID32-LABEL: define {{[^@]+}}@lock_free_cap
// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr noundef [[I]])
// HYBRID32-NEXT: ret i1 [[CALL]]
//
_Bool lock_free_cap(__intcap* i) {
#ifdef __CHERI_PURE_CAPABILITY__
_Static_assert(__atomic_always_lock_free(sizeof(*i), 0), "");
#endif
return __atomic_is_lock_free(sizeof(*i), i);
}

//
// PURECAP64-LABEL: define {{[^@]+}}@lock_free_i128
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: ret i1 true
//
// HYBRID64-LABEL: define {{[^@]+}}@lock_free_i128
// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef [[I]])
// HYBRID64-NEXT: ret i1 [[CALL]]
//
// PURECAP32-LABEL: define {{[^@]+}}@lock_free_i128
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: ret i1 true
//
// HYBRID32-LABEL: define {{[^@]+}}@lock_free_i128
// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr noundef [[I]])
// HYBRID32-NEXT: ret i1 [[CALL]]
//
_Bool lock_free_i128(cap_size_int* i) {
#ifdef __CHERI_PURE_CAPABILITY__
_Static_assert(__atomic_always_lock_free(sizeof(*i), 0), "");
#endif
return __atomic_is_lock_free(sizeof(*i), i);
}
12 changes: 8 additions & 4 deletions clang/test/Preprocessor/cheri-lock-free.c
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
/// Check that we report pointers as being always lock-free, otherwise <atomic>
/// ends up using locks with -ffreestanding.
// RUN: %riscv32_cheri_cc1 -fgnuc-version=4.2.1 -target-feature +a -E -dM %s \
// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-32 --implicit-check-not=_LOCK_FREE
// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-32-HYBRID --implicit-check-not=_LOCK_FREE
// RUN: %riscv32_cheri_purecap_cc1 -fgnuc-version=4.2.1 -target-feature +a -E -dM %s \
// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-32 --implicit-check-not=_LOCK_FREE
// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-32-PURECAP --implicit-check-not=_LOCK_FREE
// RUN: %riscv64_cheri_cc1 -fgnuc-version=4.2.1 -target-feature +a -E -dM %s \
// RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-64 --implicit-check-not=_LOCK_FREE
// RUN: %riscv64_cheri_purecap_cc1 -fgnuc-version=4.2.1 -target-feature +a -E -dM %s \
Expand All @@ -15,7 +15,9 @@
// CHECK: #define __CLANG_ATOMIC_CHAR_LOCK_FREE 2
// CHECK: #define __CLANG_ATOMIC_INT_LOCK_FREE 2
// CHECK-64: #define __CLANG_ATOMIC_LLONG_LOCK_FREE 2
// CHECK-32: #define __CLANG_ATOMIC_LLONG_LOCK_FREE 1
// NB: LLONG is always lockfree for RV32 purecap since we use capability atomics.
// CHECK-32-HYBRID: #define __CLANG_ATOMIC_LLONG_LOCK_FREE 1
// CHECK-32-PURECAP: #define __CLANG_ATOMIC_LLONG_LOCK_FREE 2
// CHECK: #define __CLANG_ATOMIC_LONG_LOCK_FREE 2
// CHECK: #define __CLANG_ATOMIC_POINTER_LOCK_FREE 2
// CHECK: #define __CLANG_ATOMIC_SHORT_LOCK_FREE 2
Expand All @@ -26,7 +28,9 @@
// CHECK: #define __GCC_ATOMIC_CHAR_LOCK_FREE 2
// CHECK: #define __GCC_ATOMIC_INT_LOCK_FREE 2
// CHECK-64: #define __GCC_ATOMIC_LLONG_LOCK_FREE 2
// CHECK-32: #define __GCC_ATOMIC_LLONG_LOCK_FREE 1
// NB: LLONG is always lockfree for RV32 purecap since we use capability atomics.
// CHECK-32-HYBRID: #define __GCC_ATOMIC_LLONG_LOCK_FREE 1
// CHECK-32-PURECAP: #define __GCC_ATOMIC_LLONG_LOCK_FREE 2
// CHECK: #define __GCC_ATOMIC_LONG_LOCK_FREE 2
// CHECK: #define __GCC_ATOMIC_POINTER_LOCK_FREE 2
// CHECK: #define __GCC_ATOMIC_SHORT_LOCK_FREE 2
Expand Down
22 changes: 22 additions & 0 deletions clang/test/Sema/cheri/atomic-lock-free.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
/// Check that we report true for __atomic_always_lock_free(sizeof(uintptr_t)).
/// For example libc++'s std::atomic includes a is_always_lock_free member defined as
/// static _LIBCPP_CONSTEXPR bool is_always_lock_free = __atomic_always_lock_free(sizeof(__a_), 0);
/// This was incorrectly being set to false for purecap std::atomic<uintptr_t>.
/// Ideally the builtin would take a type rather than a size but unfortunately it's too late to change that.
/// See also CodeGen/cheri/atomic-lock-free.c to show that we generate the appropriate code.
// RUN: %riscv64_cheri_purecap_cc1 -target-feature +a %s -fsyntax-only -verify=purecap
// RUN: %riscv64_cheri_cc1 -target-feature +a %s -fsyntax-only -verify=hybrid
// RUN: %riscv32_cheri_purecap_cc1 -target-feature +a %s -fsyntax-only -verify=purecap
// RUN: %riscv32_cheri_cc1 -target-feature +a %s -fsyntax-only -verify=hybrid
// purecap-no-diagnostics

_Static_assert(__atomic_always_lock_free(sizeof(char), 0), "");
_Static_assert(__atomic_always_lock_free(sizeof(short), 0), "");
_Static_assert(__atomic_always_lock_free(sizeof(int), 0), "");
_Static_assert(__atomic_always_lock_free(sizeof(__INTPTR_TYPE__), 0), "");
_Static_assert(__atomic_always_lock_free(sizeof(__UINTPTR_TYPE__), 0), "");
_Static_assert(__atomic_always_lock_free(sizeof(void *), 0), "");
/// TODO: it would be nice if hybrid mode also allowed lock-free sizeof(void * __capability)
/// but this is not currently true since atomic RMW/CMPXCHG with capability
/// pointers are not supported.
_Static_assert(__atomic_always_lock_free(sizeof(void * __capability), 0), ""); // hybrid-error{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(void * __capability), 0)'}}
2 changes: 2 additions & 0 deletions llvm/include/llvm/CodeGen/MachineFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -960,13 +960,15 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction {
Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
bool ExactCompare = false,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);

MachineMemOperand *getMachineMemOperand(
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy,
Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
bool ExactCompare = false,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);

/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
Expand Down
Loading
Loading