Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1708,7 +1708,8 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__sync_sub_and_fetch_4:
case Builtin::BI__sync_sub_and_fetch_8:
case Builtin::BI__sync_sub_and_fetch_16:
llvm_unreachable("BI__sync_sub_and_fetch like NYI");
return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Sub, E,
cir::BinOpKind::Sub);

case Builtin::BI__sync_and_and_fetch_1:
case Builtin::BI__sync_and_and_fetch_2:
Expand Down
77 changes: 77 additions & 0 deletions clang/test/CIR/CodeGen/atomic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -739,4 +739,81 @@ extern "C" void test_op_and_fetch (void)
// LLVM: [[RET7:%.*]] = add i64 [[RES7]], [[CONV7]]
// LLVM: store i64 [[RET7]], ptr @ull, align 8
ull = __sync_add_and_fetch (&ull, uc);

// CHECK: [[VAL0:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s8i
// CHECK: [[RES0:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
// CHECK: [[RET0:%.*]] = cir.binop(sub, [[RES0]], [[VAL0]]) : !s8i
// LLVM: [[VAL0:%.*]] = load i8, ptr @uc, align 1
// LLVM: [[RES0:%.*]] = atomicrmw sub ptr @sc, i8 [[VAL0]] seq_cst, align 1
// LLVM: [[RET0:%.*]] = sub i8 [[RES0]], [[VAL0]]
// LLVM: store i8 [[RET0]], ptr @sc, align 1
sc = __sync_sub_and_fetch (&sc, uc);

// CHECK: [[RES1:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
// CHECK: [[RET1:%.*]] = cir.binop(sub, [[RES1]], [[VAL1]]) : !u8i
// LLVM: [[VAL1:%.*]] = load i8, ptr @uc, align 1
// LLVM: [[RES1:%.*]] = atomicrmw sub ptr @uc, i8 [[VAL1]] seq_cst, align 1
// LLVM: [[RET1:%.*]] = sub i8 [[RES1]], [[VAL1]]
// LLVM: store i8 [[RET1]], ptr @uc, align 1
uc = __sync_sub_and_fetch (&uc, uc);

// CHECK: [[VAL2:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s16i
// CHECK: [[RES2:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
// CHECK: [[RET2:%.*]] = cir.binop(sub, [[RES2]], [[VAL2]]) : !s16i
// LLVM: [[VAL2:%.*]] = load i8, ptr @uc, align 1
// LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
// LLVM: [[RES2:%.*]] = atomicrmw sub ptr @ss, i16 [[CONV2]] seq_cst, align 2
// LLVM: [[RET2:%.*]] = sub i16 [[RES2]], [[CONV2]]
// LLVM: store i16 [[RET2]], ptr @ss, align 2
ss = __sync_sub_and_fetch (&ss, uc);

// CHECK: [[VAL3:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u16i
// CHECK: [[RES3:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
// CHECK: [[RET3:%.*]] = cir.binop(sub, [[RES3]], [[VAL3]]) : !u16i
// LLVM: [[VAL3:%.*]] = load i8, ptr @uc, align 1
// LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
// LLVM: [[RES3:%.*]] = atomicrmw sub ptr @us, i16 [[CONV3]] seq_cst, align 2
// LLVM: [[RET3:%.*]] = sub i16 [[RES3]], [[CONV3]]
// LLVM: store i16 [[RET3]], ptr @us
us = __sync_sub_and_fetch (&us, uc);

// CHECK: [[VAL4:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s32i
// CHECK: [[RES4:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
// CHECK: [[RET4:%.*]] = cir.binop(sub, [[RES4]], [[VAL4]]) : !s32i
// LLVM: [[VAL4:%.*]] = load i8, ptr @uc, align 1
// LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
// LLVM: [[RES4:%.*]] = atomicrmw sub ptr @si, i32 [[CONV4]] seq_cst, align 4
// LLVM: [[RET4:%.*]] = sub i32 [[RES4]], [[CONV4]]
// LLVM: store i32 [[RET4]], ptr @si, align 4
si = __sync_sub_and_fetch (&si, uc);

// CHECK: [[VAL5:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u32i
// CHECK: [[RES5:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
// CHECK: [[RET5:%.*]] = cir.binop(sub, [[RES5]], [[VAL5]]) : !u32i
// LLVM: [[VAL5:%.*]] = load i8, ptr @uc, align 1
// LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
// LLVM: [[RES5:%.*]] = atomicrmw sub ptr @ui, i32 [[CONV5]] seq_cst, align 4
// LLVM: [[RET5:%.*]] = sub i32 [[RES5]], [[CONV5]]
// LLVM: store i32 [[RET5]], ptr @ui, align 4
ui = __sync_sub_and_fetch (&ui, uc);

// CHECK: [[VAL6:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !s64i
// CHECK: [[RES6:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
// CHECK: [[RET6:%.*]] = cir.binop(sub, [[RES6]], [[VAL6]]) : !s64i
// LLVM: [[VAL6:%.*]] = load i8, ptr @uc, align 1
// LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
// LLVM: [[RES6:%.*]] = atomicrmw sub ptr @sll, i64 [[CONV6]] seq_cst, align 8
// LLVM: [[RET6:%.*]] = sub i64 [[RES6]], [[CONV6]]
// LLVM: store i64 [[RET6]], ptr @sll, align 8
sll = __sync_sub_and_fetch (&sll, uc);

// CHECK: [[VAL7:%.*]] = cir.cast(integral, {{%.*}} : !u8i), !u64i
// CHECK: [[RES7:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
// CHECK: [[RET7:%.*]] = cir.binop(sub, [[RES7]], [[VAL7]]) : !u64i
// LLVM: [[VAL7:%.*]] = load i8, ptr @uc, align 1
// LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
// LLVM: [[RES7:%.*]] = atomicrmw sub ptr @ull, i64 [[CONV7]] seq_cst, align 8
// LLVM: [[RET7:%.*]] = sub i64 [[RES7]], [[CONV7]]
// LLVM: store i64 [[RET7]], ptr @ull, align 8
ull = __sync_sub_and_fetch (&ull, uc);
}