diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 652426db6202..e3d08020df98 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -5515,6 +5515,18 @@ def AtomicXchg : CIR_Op<"atomic.xchg", [AllTypesMatch<["result", "val"]>]> { let hasVerifier = 0; } +def MemScope_SingleThread : I32EnumAttrCase<"MemScope_SingleThread", + 0, "single_thread">; +def MemScope_System : I32EnumAttrCase<"MemScope_System", + 1, "system">; + +def MemScopeKind : I32EnumAttr< + "MemScopeKind", + "Memory Scope Enumeration", + [MemScope_SingleThread, MemScope_System]> { + let cppNamespace = "::cir"; +} + def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", [AllTypesMatch<["old", "expected", "desired"]>]> { let summary = "Atomic compare exchange"; @@ -5537,6 +5549,7 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", CIR_AnyType:$desired, Arg:$succ_order, Arg:$fail_order, + OptionalAttr:$syncscope, OptionalAttr:$alignment, UnitAttr:$weak, UnitAttr:$is_volatile); @@ -5549,6 +5562,7 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", `success` `=` $succ_order `,` `failure` `=` $fail_order `)` + (`syncscope` `(` $syncscope^ `)`)? (`align` `(` $alignment^ `)`)? (`weak` $weak^)? (`volatile` $is_volatile^)? @@ -5558,18 +5572,6 @@ def AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", let hasVerifier = 0; } -def MemScope_SingleThread : I32EnumAttrCase<"MemScope_SingleThread", - 0, "single_thread">; -def MemScope_System : I32EnumAttrCase<"MemScope_System", - 1, "system">; - -def MemScopeKind : I32EnumAttr< - "MemScopeKind", - "Memory Scope Enumeration", - [MemScope_SingleThread, MemScope_System]> { - let cppNamespace = "::cir"; -} - def AtomicFence : CIR_Op<"atomic.fence"> { let summary = "Atomic thread fence"; let description = [{ diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index fc9c7c19afb4..2c29c10705e2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -415,7 +415,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Val2, uint64_t Size, cir::MemOrder SuccessOrder, cir::MemOrder FailureOrder, - llvm::SyncScope::ID Scope) { + cir::MemScopeKind Scope) { auto &builder = CGF.getBuilder(); auto loc = CGF.getLoc(E->getSourceRange()); auto Expected = builder.createLoad(loc, Val1); @@ -425,6 +425,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, loc, Expected.getType(), boolTy, Ptr.getPointer(), Expected, Desired, cir::MemOrderAttr::get(&CGF.getMLIRContext(), SuccessOrder), cir::MemOrderAttr::get(&CGF.getMLIRContext(), FailureOrder), + cir::MemScopeKindAttr::get(&CGF.getMLIRContext(), Scope), builder.getI64IntegerAttr(Ptr.getAlignment().getAsAlign().value())); cmpxchg.setIsVolatile(E->isVolatile()); cmpxchg.setWeak(IsWeak); @@ -452,7 +453,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, static void emitAtomicCmpXchgFailureSet( CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value FailureOrderVal, uint64_t Size, - cir::MemOrder SuccessOrder, llvm::SyncScope::ID Scope) { + cir::MemOrder SuccessOrder, cir::MemScopeKind Scope) { cir::MemOrder FailureOrder; if (auto ordAttr = getConstOpIntAttr(FailureOrderVal)) { @@ -541,7 +542,8 @@ static void emitAtomicCmpXchgFailureSet( static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, cir::MemOrder Order, uint8_t Scope) { + uint64_t Size, cir::MemOrder Order, + cir::MemScopeKind Scope) { assert(!cir::MissingFeatures::syncScopeID()); StringRef Op; @@ -797,7 +799,7 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, if (!ScopeModel) { assert(!cir::MissingFeatures::syncScopeID()); emitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, - Order, /*FIXME(cir): LLVM default scope*/ 1); + Order, cir::MemScopeKind::MemScope_System); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 98eeb73dd056..52fe2840e9fb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -318,7 +318,7 @@ static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, return RValue::get(result); } -static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, +static mlir::Value makeAtomicCmpXchgValue(CIRGenFunction &cgf, const CallExpr *expr, bool returnBool) { QualType typ = returnBool ? expr->getArg(1)->getType() : expr->getType(); @@ -341,6 +341,8 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, cir::MemOrder::SequentiallyConsistent), MemOrderAttr::get(&cgf.getMLIRContext(), cir::MemOrder::SequentiallyConsistent), + MemScopeKindAttr::get(&cgf.getMLIRContext(), + cir::MemScopeKind::MemScope_System), builder.getI64IntegerAttr(destAddr.getAlignment().getAsAlign().value())); return returnBool ? op.getResult(1) : op.getResult(0); @@ -1854,14 +1856,14 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_val_compare_and_swap_4: case Builtin::BI__sync_val_compare_and_swap_8: case Builtin::BI__sync_val_compare_and_swap_16: - return RValue::get(MakeAtomicCmpXchgValue(*this, E, false)); + return RValue::get(makeAtomicCmpXchgValue(*this, E, false)); case Builtin::BI__sync_bool_compare_and_swap_1: case Builtin::BI__sync_bool_compare_and_swap_2: case Builtin::BI__sync_bool_compare_and_swap_4: case Builtin::BI__sync_bool_compare_and_swap_8: case Builtin::BI__sync_bool_compare_and_swap_16: - return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); + return RValue::get(makeAtomicCmpXchgValue(*this, E, true)); case Builtin::BI__sync_swap_1: case Builtin::BI__sync_swap_2: diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 131b14cd9aef..12f783fe778a 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -3210,11 +3210,12 @@ mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite( auto expected = adaptor.getExpected(); auto desired = adaptor.getDesired(); - // FIXME: add syncscope. auto cmpxchg = rewriter.create( op.getLoc(), adaptor.getPtr(), expected, desired, getLLVMAtomicOrder(adaptor.getSuccOrder()), getLLVMAtomicOrder(adaptor.getFailOrder())); + if (const auto ss = adaptor.getSyncscope(); ss.has_value()) + cmpxchg.setSyncscope(getLLVMSyncScope(ss.value())); cmpxchg.setAlignment(adaptor.getAlignment()); cmpxchg.setWeak(adaptor.getWeak()); cmpxchg.setVolatile_(adaptor.getIsVolatile()); diff --git a/clang/test/CIR/CodeGen/atomic-runtime.cpp b/clang/test/CIR/CodeGen/atomic-runtime.cpp index cea46849c83e..231261d58224 100644 --- a/clang/test/CIR/CodeGen/atomic-runtime.cpp +++ b/clang/test/CIR/CodeGen/atomic-runtime.cpp @@ -120,7 +120,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = relaxed) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = relaxed) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -131,7 +131,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = acquire) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = acquire) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -142,7 +142,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = seq_cst) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = relaxed, failure = seq_cst) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -158,7 +158,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = relaxed) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = relaxed) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -169,7 +169,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = acquire) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = acquire) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -180,7 +180,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = seq_cst) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acquire, failure = seq_cst) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -196,7 +196,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = relaxed) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = relaxed) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -207,7 +207,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = acquire) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = acquire) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -218,7 +218,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = seq_cst) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = release, failure = seq_cst) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -234,7 +234,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = relaxed) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = relaxed) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -245,7 +245,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = acquire) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = acquire) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -256,7 +256,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = seq_cst) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = acq_rel, failure = seq_cst) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -272,7 +272,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(default, []) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = relaxed) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = relaxed) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -283,7 +283,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(anyof, [#cir.int<1> : !s32i, #cir.int<2> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = acquire) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = acquire) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr @@ -294,7 +294,7 @@ bool atomic_compare_exchange_n(int* ptr, int* expected, // CHECK: cir.case(equal, [#cir.int<5> : !s32i]) { // CHECK: %[[expected:.*]] = cir.load %[[expected_addr]] : !cir.ptr, !s32i // CHECK: %[[desired:.*]] = cir.load %[[desired_var]] : !cir.ptr, !s32i -// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = seq_cst) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg(%[[ptr]] : !cir.ptr, %[[expected]] : !s32i, %[[desired]] : !s32i, success = seq_cst, failure = seq_cst) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[succeeded:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[succeeded]] { // CHECK: cir.store %old, %[[expected_addr]] : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/atomic-xchg-field.c b/clang/test/CIR/CodeGen/atomic-xchg-field.c index d2d0a6be7410..4ca4daa4baf6 100644 --- a/clang/test/CIR/CodeGen/atomic-xchg-field.c +++ b/clang/test/CIR/CodeGen/atomic-xchg-field.c @@ -48,7 +48,7 @@ void structAtomicExchange(unsigned referenceCount, wPtr item) { } // CHECK-LABEL: @structAtomicExchange -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u32i, {{.*}} : !u32i, success = seq_cst, failure = seq_cst) align(8) weak : (!u32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u32i, {{.*}} : !u32i, success = seq_cst, failure = seq_cst) syncscope(system) align(8) weak : (!u32i, !cir.bool) // LLVM-LABEL: @structAtomicExchange // LLVM: load i32 diff --git a/clang/test/CIR/CodeGen/atomic.cpp b/clang/test/CIR/CodeGen/atomic.cpp index d04c767591a4..dcdae8065de3 100644 --- a/clang/test/CIR/CodeGen/atomic.cpp +++ b/clang/test/CIR/CodeGen/atomic.cpp @@ -261,7 +261,7 @@ bool fd4(struct S *a, struct S *b, struct S *c) { } // CHECK-LABEL: @_Z3fd4P1SS0_S0_ -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) align(8) weak : (!u64i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) syncscope(system) align(8) weak : (!u64i, !cir.bool) // LLVM-LABEL: @_Z3fd4P1SS0_S0_ // LLVM: cmpxchg weak ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 @@ -273,7 +273,7 @@ bool fi4a(int *i) { } // CHECK-LABEL: @_Z4fi4aPi -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) syncscope(system) align(4) : (!s32i, !cir.bool) // LLVM-LABEL: @_Z4fi4aPi // LLVM: %[[RES:.*]] = cmpxchg ptr %7, i32 %8, i32 %9 acquire acquire, align 4 @@ -286,7 +286,7 @@ bool fi4b(int *i) { } // CHECK-LABEL: @_Z4fi4bPi -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) align(4) weak : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = acquire, failure = acquire) syncscope(system) align(4) weak : (!s32i, !cir.bool) // LLVM-LABEL: @_Z4fi4bPi // LLVM: %[[R:.*]] = cmpxchg weak ptr {{.*}}, i32 {{.*}}, i32 {{.*}} acquire acquire, align 4 @@ -299,7 +299,7 @@ bool fi4c(atomic_int *i) { } // CHECK-LABEL: @_Z4fi4cPU7_Atomici -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = seq_cst, failure = seq_cst) align(4) : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = seq_cst, failure = seq_cst) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: %[[CMP:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[CMP:.*]] { // CHECK: cir.store %old, {{.*}} : !s32i, !cir.ptr @@ -314,7 +314,7 @@ bool fi4d(atomic_int *i) { } // CHECK-LABEL: @_Z4fi4dPU7_Atomici -// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = seq_cst, failure = seq_cst) align(4) weak : (!s32i, !cir.bool) +// CHECK: %old, %cmp = cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s32i, {{.*}} : !s32i, success = seq_cst, failure = seq_cst) syncscope(system) align(4) weak : (!s32i, !cir.bool) // CHECK: %[[CMP:.*]] = cir.unary(not, %cmp) : !cir.bool, !cir.bool // CHECK: cir.if %[[CMP:.*]] { // CHECK: cir.store %old, {{.*}} : !s32i, !cir.ptr @@ -451,7 +451,7 @@ void sub_byte(char* a, char b) { // CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr // CHECK: %[[CMP:.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i -// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP]] : !s32i, %[[UPD]] : !s32i, success = seq_cst, failure = seq_cst) align(4) : (!s32i, !cir.bool) +// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP]] : !s32i, %[[UPD]] : !s32i, success = seq_cst, failure = seq_cst) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: cir.store %[[RES]], {{.*}} : !cir.bool, !cir.ptr // LLVM-LABEL: @_Z12cmp_bool_int @@ -468,7 +468,7 @@ void cmp_bool_int(int* p, int x, int u) { // CHECK-LABEL: @_Z13cmp_bool_long -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) align(8) : (!s64i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) syncscope(system) align(8) : (!s64i, !cir.bool) // LLVM-LABEL: @_Z13cmp_bool_long // LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 @@ -477,7 +477,7 @@ void cmp_bool_long(long* p, long x, long u) { } // CHECK-LABEL: @_Z14cmp_bool_short -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s16i, {{.*}} : !s16i, success = seq_cst, failure = seq_cst) align(2) : (!s16i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s16i, {{.*}} : !s16i, success = seq_cst, failure = seq_cst) syncscope(system) align(2) : (!s16i, !cir.bool) // LLVM-LABEL: @_Z14cmp_bool_short // LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst, align 2 @@ -486,7 +486,7 @@ void cmp_bool_short(short* p, short x, short u) { } // CHECK-LABEL: @_Z13cmp_bool_byte -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) align(1) : (!s8i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) syncscope(system) align(1) : (!s8i, !cir.bool) // LLVM-LABEL: @_Z13cmp_bool_byte // LLVM: cmpxchg ptr {{.*}}, i8 {{.*}}, i8 {{.*}} seq_cst seq_cst, align 1 @@ -498,7 +498,7 @@ void cmp_bool_byte(char* p, char x, char u) { // CHECK: %[[PTR:.*]] = cir.load {{.*}} : !cir.ptr>, !cir.ptr // CHECK: %[[CMP:.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i -// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP]] : !s32i, %[[UPD]] : !s32i, success = seq_cst, failure = seq_cst) align(4) : (!s32i, !cir.bool) +// CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP]] : !s32i, %[[UPD]] : !s32i, success = seq_cst, failure = seq_cst) syncscope(system) align(4) : (!s32i, !cir.bool) // CHECK: cir.store %[[OLD]], {{.*}} : !s32i, !cir.ptr // LLVM-LABEL: @_Z11cmp_val_int @@ -513,7 +513,7 @@ void cmp_val_int(int* p, int x, int u) { } // CHECK-LABEL: @_Z12cmp_val_long -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) align(8) : (!s64i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s64i, {{.*}} : !s64i, success = seq_cst, failure = seq_cst) syncscope(system) align(8) : (!s64i, !cir.bool) // LLVM-LABEL: @_Z12cmp_val_long // LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 @@ -522,7 +522,7 @@ void cmp_val_long(long* p, long x, long u) { } // CHECK-LABEL: @_Z13cmp_val_short -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s16i, {{.*}} : !s16i, success = seq_cst, failure = seq_cst) align(2) : (!s16i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s16i, {{.*}} : !s16i, success = seq_cst, failure = seq_cst) syncscope(system) align(2) : (!s16i, !cir.bool) // LLVM-LABEL: @_Z13cmp_val_short // LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst, align 2 @@ -531,7 +531,7 @@ void cmp_val_short(short* p, short x, short u) { } // CHECK-LABEL: @_Z12cmp_val_byte -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) align(1) : (!s8i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !s8i, {{.*}} : !s8i, success = seq_cst, failure = seq_cst) syncscope(system) align(1) : (!s8i, !cir.bool) // LLVM-LABEL: @_Z12cmp_val_byte // LLVM: cmpxchg ptr {{.*}}, i8 {{.*}}, i8 {{.*}} seq_cst seq_cst, align 1 @@ -601,7 +601,7 @@ void sub_uchar(unsigned char* a, char b) { // CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: %[[UPD_U:.*]] = cir.cast(integral, %[[UPD]] : !s32i), !u32i // CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP_U]] : -// CHECK-SAME: !u32i, %[[UPD_U]] : !u32i, success = seq_cst, failure = seq_cst) align(4) : (!u32i, !cir.bool) +// CHECK-SAME: !u32i, %[[UPD_U]] : !u32i, success = seq_cst, failure = seq_cst) syncscope(system) align(4) : (!u32i, !cir.bool) // CHECK: cir.store %[[RES]], {{.*}} : !cir.bool, !cir.ptr // LLVM-LABEL: @_Z13cmp_bool_uint @@ -617,7 +617,7 @@ void cmp_bool_uint(unsigned int* p, int x, int u) { } // CHECK-LABEL: @_Z15cmp_bool_ushort -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u16i, {{.*}} : !u16i, success = seq_cst, failure = seq_cst) align(2) : (!u16i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u16i, {{.*}} : !u16i, success = seq_cst, failure = seq_cst) syncscope(system) align(2) : (!u16i, !cir.bool) // LLVM-LABEL: @_Z15cmp_bool_ushort // LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst, align 2 @@ -626,7 +626,7 @@ void cmp_bool_ushort(unsigned short* p, short x, short u) { } // CHECK-LABEL: @_Z14cmp_bool_ulong -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) align(8) : (!u64i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) syncscope(system) align(8) : (!u64i, !cir.bool) // LLVM-LABEL: @_Z14cmp_bool_ulong // LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 @@ -641,7 +641,7 @@ void cmp_bool_ulong(unsigned long* p, long x, long u) { // CHECK: %[[UPD:.*]] = cir.load {{.*}} : !cir.ptr, !s32i // CHECK: %[[UPD_U:.*]] = cir.cast(integral, %[[UPD]] : !s32i), !u32i // CHECK: %[[OLD:.*]], %[[RES:.*]] = cir.atomic.cmp_xchg(%[[PTR]] : !cir.ptr, %[[CMP_U]] : -// CHECK-SAME: !u32i, %[[UPD_U]] : !u32i, success = seq_cst, failure = seq_cst) align(4) : (!u32i, !cir.bool) +// CHECK-SAME: !u32i, %[[UPD_U]] : !u32i, success = seq_cst, failure = seq_cst) syncscope(system) align(4) : (!u32i, !cir.bool) // CHECK: %[[R:.*]] = cir.cast(integral, %[[OLD]] : !u32i), !s32i // CHECK: cir.store %[[R]], {{.*}} : !s32i, !cir.ptr @@ -657,7 +657,7 @@ void cmp_val_uint(unsigned int* p, int x, int u) { } // CHECK-LABEL: @_Z14cmp_val_ushort -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u16i, {{.*}} : !u16i, success = seq_cst, failure = seq_cst) align(2) : (!u16i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u16i, {{.*}} : !u16i, success = seq_cst, failure = seq_cst) syncscope(system) align(2) : (!u16i, !cir.bool) // LLVM-LABEL: @_Z14cmp_val_ushort // LLVM: cmpxchg ptr {{.*}}, i16 {{.*}}, i16 {{.*}} seq_cst seq_cst, align 2 @@ -666,7 +666,7 @@ void cmp_val_ushort(unsigned short* p, short x, short u) { } // CHECK-LABEL: @_Z13cmp_val_ulong -// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) align(8) : (!u64i, !cir.bool) +// CHECK: cir.atomic.cmp_xchg({{.*}} : !cir.ptr, {{.*}} : !u64i, {{.*}} : !u64i, success = seq_cst, failure = seq_cst) syncscope(system) align(8) : (!u64i, !cir.bool) // LLVM-LABEL: @_Z13cmp_val_ulong // LLVM: cmpxchg ptr {{.*}}, i64 {{.*}}, i64 {{.*}} seq_cst seq_cst, align 8 diff --git a/clang/test/CIR/Lowering/syncscope.cir b/clang/test/CIR/Lowering/syncscope.cir new file mode 100644 index 000000000000..8e1aad8a743c --- /dev/null +++ b/clang/test/CIR/Lowering/syncscope.cir @@ -0,0 +1,12 @@ +// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o - | FileCheck %s -check-prefix=LLVM + +!s32i = !cir.int +#fn_attr = #cir, nothrow = #cir.nothrow, optnone = #cir.optnone})> +module { + cir.func @test(%ptr: !cir.ptr, %expected: !s32i, %desired: !s32i) -> !cir.bool extra(#fn_attr) { + %old, %cmp = cir.atomic.cmp_xchg(%ptr : !cir.ptr, %expected : !s32i, %desired : !s32i, success = acquire, failure = acquire) syncscope(single_thread) align(4) : (!s32i, !cir.bool) + cir.return %cmp: !cir.bool + } +} + +// LLVM: {{%.*}} = cmpxchg ptr {{%.*}}, i32 {{%.*}}, i32 {{%.*}} syncscope("singlethread") acquire acquire, align 4