From 9f8a90b3ce0e248e3b68b056d4c840295facbc02 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Fri, 9 Nov 2012 12:55:36 +0000 Subject: tsan: instrument all atomics (including fetch_add, exchange, cas, etc) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@167612 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Transforms/Instrumentation/ThreadSanitizer.cpp | 84 +- test/Instrumentation/ThreadSanitizer/atomic.ll | 1672 ++++++++++++++++++-- 2 files changed, 1644 insertions(+), 112 deletions(-) diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index c6244a55c9..27514a5b72 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -97,6 +97,10 @@ struct ThreadSanitizer : public FunctionPass { Function *TsanWrite[kNumberOfAccessSizes]; Function *TsanAtomicLoad[kNumberOfAccessSizes]; Function *TsanAtomicStore[kNumberOfAccessSizes]; + Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; + Function *TsanAtomicCAS[kNumberOfAccessSizes]; + Function *TsanAtomicThreadFence; + Function *TsanAtomicSignalFence; Function *TsanVptrUpdate; }; } // namespace @@ -167,10 +171,42 @@ bool ThreadSanitizer::doInitialization(Module &M) { TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction( AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy, NULL)); + + for (int op = AtomicRMWInst::FIRST_BINOP; + op <= AtomicRMWInst::LAST_BINOP; ++op) { + TsanAtomicRMW[op][i] = NULL; + const char *NamePart = NULL; + if (op == AtomicRMWInst::Xchg) + NamePart = "_exchange"; + else if (op == AtomicRMWInst::Add) + NamePart = "_fetch_add"; + else if (op == AtomicRMWInst::Sub) + NamePart = "_fetch_sub"; + else if (op == AtomicRMWInst::And) + NamePart = "_fetch_and"; + else if (op == AtomicRMWInst::Or) + NamePart = "_fetch_or"; + else if (op == AtomicRMWInst::Xor) + NamePart = "_fetch_xor"; + else + continue; + SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); + TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction( + RMWName, Ty, PtrTy, Ty, OrdTy, NULL)); + } + + SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) + + "_compare_exchange_val"); + TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction( + AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, NULL)); } TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction( "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), NULL)); + TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction( + "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL)); + TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction( + "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL)); return true; } @@ -253,8 +289,8 @@ static bool isAtomic(Instruction *I) { return true; if (isa(I)) return true; - if (FenceInst *FI = dyn_cast(I)) - return FI->getSynchScope() == CrossThread; + if (isa(I)) + return true; return false; } @@ -397,12 +433,44 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) { CallInst *C = CallInst::Create(TsanAtomicStore[Idx], ArrayRef(Args)); ReplaceInstWithInst(I, C); - } else if (isa(I)) { - // FIXME: Not yet supported. - } else if (isa(I)) { - // FIXME: Not yet supported. - } else if (isa(I)) { - // FIXME: Not yet supported. + } else if (AtomicRMWInst *RMWI = dyn_cast(I)) { + Value *Addr = RMWI->getPointerOperand(); + int Idx = getMemoryAccessFuncIndex(Addr); + if (Idx < 0) + return false; + Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx]; + if (F == NULL) + return false; + const size_t ByteSize = 1 << Idx; + const size_t BitSize = ByteSize * 8; + Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); + Type *PtrTy = Ty->getPointerTo(); + Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), + createOrdering(&IRB, RMWI->getOrdering())}; + CallInst *C = CallInst::Create(F, ArrayRef(Args)); + ReplaceInstWithInst(I, C); + } else if (AtomicCmpXchgInst *CASI = dyn_cast(I)) { + Value *Addr = CASI->getPointerOperand(); + int Idx = getMemoryAccessFuncIndex(Addr); + if (Idx < 0) + return false; + const size_t ByteSize = 1 << Idx; + const size_t BitSize = ByteSize * 8; + Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); + Type *PtrTy = Ty->getPointerTo(); + Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false), + IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false), + createOrdering(&IRB, CASI->getOrdering())}; + CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef(Args)); + ReplaceInstWithInst(I, C); + } else if (FenceInst *FI = dyn_cast(I)) { + Value *Args[] = {createOrdering(&IRB, FI->getOrdering())}; + Function *F = FI->getSynchScope() == SingleThread ? + TsanAtomicSignalFence : TsanAtomicThreadFence; + CallInst *C = CallInst::Create(F, ArrayRef(Args)); + ReplaceInstWithInst(I, C); } return true; } diff --git a/test/Instrumentation/ThreadSanitizer/atomic.ll b/test/Instrumentation/ThreadSanitizer/atomic.ll index ed3c821205..672e47e1f2 100644 --- a/test/Instrumentation/ThreadSanitizer/atomic.ll +++ b/test/Instrumentation/ThreadSanitizer/atomic.ll @@ -66,6 +66,286 @@ entry: ; CHECK: atomic8_store_seq_cst ; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 100532) +define void @atomic8_xchg_monotonic(i8* %a) nounwind uwtable { +entry: + atomicrmw xchg i8* %a, i8 0 monotonic + ret void +} +; CHECK: atomic8_xchg_monotonic +; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 100501) + +define void @atomic8_add_monotonic(i8* %a) nounwind uwtable { +entry: + atomicrmw add i8* %a, i8 0 monotonic + ret void +} +; CHECK: atomic8_add_monotonic +; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 100501) + +define void @atomic8_sub_monotonic(i8* %a) nounwind uwtable { +entry: + atomicrmw sub i8* %a, i8 0 monotonic + ret void +} +; CHECK: atomic8_sub_monotonic +; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 100501) + +define void @atomic8_and_monotonic(i8* %a) nounwind uwtable { +entry: + atomicrmw and i8* %a, i8 0 monotonic + ret void +} +; CHECK: atomic8_and_monotonic +; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 100501) + +define void @atomic8_or_monotonic(i8* %a) nounwind uwtable { +entry: + atomicrmw or i8* %a, i8 0 monotonic + ret void +} +; CHECK: atomic8_or_monotonic +; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 100501) + +define void @atomic8_xor_monotonic(i8* %a) nounwind uwtable { +entry: + atomicrmw xor i8* %a, i8 0 monotonic + ret void +} +; CHECK: atomic8_xor_monotonic +; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 100501) + +define void @atomic8_xchg_acquire(i8* %a) nounwind uwtable { +entry: + atomicrmw xchg i8* %a, i8 0 acquire + ret void +} +; CHECK: atomic8_xchg_acquire +; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 100504) + +define void @atomic8_add_acquire(i8* %a) nounwind uwtable { +entry: + atomicrmw add i8* %a, i8 0 acquire + ret void +} +; CHECK: atomic8_add_acquire +; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 100504) + +define void @atomic8_sub_acquire(i8* %a) nounwind uwtable { +entry: + atomicrmw sub i8* %a, i8 0 acquire + ret void +} +; CHECK: atomic8_sub_acquire +; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 100504) + +define void @atomic8_and_acquire(i8* %a) nounwind uwtable { +entry: + atomicrmw and i8* %a, i8 0 acquire + ret void +} +; CHECK: atomic8_and_acquire +; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 100504) + +define void @atomic8_or_acquire(i8* %a) nounwind uwtable { +entry: + atomicrmw or i8* %a, i8 0 acquire + ret void +} +; CHECK: atomic8_or_acquire +; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 100504) + +define void @atomic8_xor_acquire(i8* %a) nounwind uwtable { +entry: + atomicrmw xor i8* %a, i8 0 acquire + ret void +} +; CHECK: atomic8_xor_acquire +; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 100504) + +define void @atomic8_xchg_release(i8* %a) nounwind uwtable { +entry: + atomicrmw xchg i8* %a, i8 0 release + ret void +} +; CHECK: atomic8_xchg_release +; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 100508) + +define void @atomic8_add_release(i8* %a) nounwind uwtable { +entry: + atomicrmw add i8* %a, i8 0 release + ret void +} +; CHECK: atomic8_add_release +; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 100508) + +define void @atomic8_sub_release(i8* %a) nounwind uwtable { +entry: + atomicrmw sub i8* %a, i8 0 release + ret void +} +; CHECK: atomic8_sub_release +; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 100508) + +define void @atomic8_and_release(i8* %a) nounwind uwtable { +entry: + atomicrmw and i8* %a, i8 0 release + ret void +} +; CHECK: atomic8_and_release +; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 100508) + +define void @atomic8_or_release(i8* %a) nounwind uwtable { +entry: + atomicrmw or i8* %a, i8 0 release + ret void +} +; CHECK: atomic8_or_release +; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 100508) + +define void @atomic8_xor_release(i8* %a) nounwind uwtable { +entry: + atomicrmw xor i8* %a, i8 0 release + ret void +} +; CHECK: atomic8_xor_release +; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 100508) + +define void @atomic8_xchg_acq_rel(i8* %a) nounwind uwtable { +entry: + atomicrmw xchg i8* %a, i8 0 acq_rel + ret void +} +; CHECK: atomic8_xchg_acq_rel +; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 100516) + +define void @atomic8_add_acq_rel(i8* %a) nounwind uwtable { +entry: + atomicrmw add i8* %a, i8 0 acq_rel + ret void +} +; CHECK: atomic8_add_acq_rel +; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 100516) + +define void @atomic8_sub_acq_rel(i8* %a) nounwind uwtable { +entry: + atomicrmw sub i8* %a, i8 0 acq_rel + ret void +} +; CHECK: atomic8_sub_acq_rel +; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 100516) + +define void @atomic8_and_acq_rel(i8* %a) nounwind uwtable { +entry: + atomicrmw and i8* %a, i8 0 acq_rel + ret void +} +; CHECK: atomic8_and_acq_rel +; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 100516) + +define void @atomic8_or_acq_rel(i8* %a) nounwind uwtable { +entry: + atomicrmw or i8* %a, i8 0 acq_rel + ret void +} +; CHECK: atomic8_or_acq_rel +; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 100516) + +define void @atomic8_xor_acq_rel(i8* %a) nounwind uwtable { +entry: + atomicrmw xor i8* %a, i8 0 acq_rel + ret void +} +; CHECK: atomic8_xor_acq_rel +; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 100516) + +define void @atomic8_xchg_seq_cst(i8* %a) nounwind uwtable { +entry: + atomicrmw xchg i8* %a, i8 0 seq_cst + ret void +} +; CHECK: atomic8_xchg_seq_cst +; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 100532) + +define void @atomic8_add_seq_cst(i8* %a) nounwind uwtable { +entry: + atomicrmw add i8* %a, i8 0 seq_cst + ret void +} +; CHECK: atomic8_add_seq_cst +; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 100532) + +define void @atomic8_sub_seq_cst(i8* %a) nounwind uwtable { +entry: + atomicrmw sub i8* %a, i8 0 seq_cst + ret void +} +; CHECK: atomic8_sub_seq_cst +; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 100532) + +define void @atomic8_and_seq_cst(i8* %a) nounwind uwtable { +entry: + atomicrmw and i8* %a, i8 0 seq_cst + ret void +} +; CHECK: atomic8_and_seq_cst +; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 100532) + +define void @atomic8_or_seq_cst(i8* %a) nounwind uwtable { +entry: + atomicrmw or i8* %a, i8 0 seq_cst + ret void +} +; CHECK: atomic8_or_seq_cst +; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 100532) + +define void @atomic8_xor_seq_cst(i8* %a) nounwind uwtable { +entry: + atomicrmw xor i8* %a, i8 0 seq_cst + ret void +} +; CHECK: atomic8_xor_seq_cst +; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 100532) + +define void @atomic8_cas_monotonic(i8* %a) nounwind uwtable { +entry: + cmpxchg i8* %a, i8 0, i8 1 monotonic + ret void +} +; CHECK: atomic8_cas_monotonic +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 100501) + +define void @atomic8_cas_acquire(i8* %a) nounwind uwtable { +entry: + cmpxchg i8* %a, i8 0, i8 1 acquire + ret void +} +; CHECK: atomic8_cas_acquire +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 100504) + +define void @atomic8_cas_release(i8* %a) nounwind uwtable { +entry: + cmpxchg i8* %a, i8 0, i8 1 release + ret void +} +; CHECK: atomic8_cas_release +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 100508) + +define void @atomic8_cas_acq_rel(i8* %a) nounwind uwtable { +entry: + cmpxchg i8* %a, i8 0, i8 1 acq_rel + ret void +} +; CHECK: atomic8_cas_acq_rel +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 100516) + +define void @atomic8_cas_seq_cst(i8* %a) nounwind uwtable { +entry: + cmpxchg i8* %a, i8 0, i8 1 seq_cst + ret void +} +; CHECK: atomic8_cas_seq_cst +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 100532) + define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable { entry: %0 = load atomic i16* %a unordered, align 2 @@ -130,189 +410,1029 @@ entry: ; CHECK: atomic16_store_seq_cst ; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 100532) -define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable { +define void @atomic16_xchg_monotonic(i16* %a) nounwind uwtable { entry: - %0 = load atomic i32* %a unordered, align 4 - ret i32 %0 + atomicrmw xchg i16* %a, i16 0 monotonic + ret void } -; CHECK: atomic32_load_unordered -; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 100501) +; CHECK: atomic16_xchg_monotonic +; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 100501) -define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable { +define void @atomic16_add_monotonic(i16* %a) nounwind uwtable { entry: - %0 = load atomic i32* %a monotonic, align 4 - ret i32 %0 + atomicrmw add i16* %a, i16 0 monotonic + ret void } -; CHECK: atomic32_load_monotonic -; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 100501) +; CHECK: atomic16_add_monotonic +; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 100501) -define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable { +define void @atomic16_sub_monotonic(i16* %a) nounwind uwtable { entry: - %0 = load atomic i32* %a acquire, align 4 - ret i32 %0 + atomicrmw sub i16* %a, i16 0 monotonic + ret void } -; CHECK: atomic32_load_acquire -; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 100504) +; CHECK: atomic16_sub_monotonic +; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 100501) -define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable { +define void @atomic16_and_monotonic(i16* %a) nounwind uwtable { entry: - %0 = load atomic i32* %a seq_cst, align 4 - ret i32 %0 + atomicrmw and i16* %a, i16 0 monotonic + ret void } -; CHECK: atomic32_load_seq_cst -; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 100532) +; CHECK: atomic16_and_monotonic +; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 100501) -define void @atomic32_store_unordered(i32* %a) nounwind uwtable { +define void @atomic16_or_monotonic(i16* %a) nounwind uwtable { entry: - store atomic i32 0, i32* %a unordered, align 4 + atomicrmw or i16* %a, i16 0 monotonic ret void } -; CHECK: atomic32_store_unordered -; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 100501) +; CHECK: atomic16_or_monotonic +; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 100501) -define void @atomic32_store_monotonic(i32* %a) nounwind uwtable { +define void @atomic16_xor_monotonic(i16* %a) nounwind uwtable { entry: - store atomic i32 0, i32* %a monotonic, align 4 + atomicrmw xor i16* %a, i16 0 monotonic ret void } -; CHECK: atomic32_store_monotonic -; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 100501) +; CHECK: atomic16_xor_monotonic +; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 100501) -define void @atomic32_store_release(i32* %a) nounwind uwtable { +define void @atomic16_xchg_acquire(i16* %a) nounwind uwtable { entry: - store atomic i32 0, i32* %a release, align 4 + atomicrmw xchg i16* %a, i16 0 acquire ret void } -; CHECK: atomic32_store_release -; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 100508) +; CHECK: atomic16_xchg_acquire +; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 100504) -define void @atomic32_store_seq_cst(i32* %a) nounwind uwtable { +define void @atomic16_add_acquire(i16* %a) nounwind uwtable { entry: - store atomic i32 0, i32* %a seq_cst, align 4 + atomicrmw add i16* %a, i16 0 acquire ret void } -; CHECK: atomic32_store_seq_cst -; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 100532) +; CHECK: atomic16_add_acquire +; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 100504) -define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable { +define void @atomic16_sub_acquire(i16* %a) nounwind uwtable { entry: - %0 = load atomic i64* %a unordered, align 8 - ret i64 %0 + atomicrmw sub i16* %a, i16 0 acquire + ret void } -; CHECK: atomic64_load_unordered -; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 100501) +; CHECK: atomic16_sub_acquire +; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 100504) -define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable { +define void @atomic16_and_acquire(i16* %a) nounwind uwtable { entry: - %0 = load atomic i64* %a monotonic, align 8 - ret i64 %0 + atomicrmw and i16* %a, i16 0 acquire + ret void } -; CHECK: atomic64_load_monotonic -; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 100501) +; CHECK: atomic16_and_acquire +; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 100504) -define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable { +define void @atomic16_or_acquire(i16* %a) nounwind uwtable { entry: - %0 = load atomic i64* %a acquire, align 8 - ret i64 %0 + atomicrmw or i16* %a, i16 0 acquire + ret void } -; CHECK: atomic64_load_acquire -; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 100504) +; CHECK: atomic16_or_acquire +; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 100504) -define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable { +define void @atomic16_xor_acquire(i16* %a) nounwind uwtable { entry: - %0 = load atomic i64* %a seq_cst, align 8 - ret i64 %0 + atomicrmw xor i16* %a, i16 0 acquire + ret void } -; CHECK: atomic64_load_seq_cst -; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 100532) +; CHECK: atomic16_xor_acquire +; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 100504) -define void @atomic64_store_unordered(i64* %a) nounwind uwtable { +define void @atomic16_xchg_release(i16* %a) nounwind uwtable { entry: - store atomic i64 0, i64* %a unordered, align 8 + atomicrmw xchg i16* %a, i16 0 release ret void } -; CHECK: atomic64_store_unordered -; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 100501) +; CHECK: atomic16_xchg_release +; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 100508) -define void @atomic64_store_monotonic(i64* %a) nounwind uwtable { +define void @atomic16_add_release(i16* %a) nounwind uwtable { entry: - store atomic i64 0, i64* %a monotonic, align 8 + atomicrmw add i16* %a, i16 0 release ret void } -; CHECK: atomic64_store_monotonic -; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 100501) +; CHECK: atomic16_add_release +; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 100508) -define void @atomic64_store_release(i64* %a) nounwind uwtable { +define void @atomic16_sub_release(i16* %a) nounwind uwtable { entry: - store atomic i64 0, i64* %a release, align 8 + atomicrmw sub i16* %a, i16 0 release ret void } -; CHECK: atomic64_store_release -; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 100508) +; CHECK: atomic16_sub_release +; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 100508) -define void @atomic64_store_seq_cst(i64* %a) nounwind uwtable { +define void @atomic16_and_release(i16* %a) nounwind uwtable { entry: - store atomic i64 0, i64* %a seq_cst, align 8 + atomicrmw and i16* %a, i16 0 release ret void } -; CHECK: atomic64_store_seq_cst -; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 100532) +; CHECK: atomic16_and_release +; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 100508) -define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable { +define void @atomic16_or_release(i16* %a) nounwind uwtable { entry: - %0 = load atomic i128* %a unordered, align 16 - ret i128 %0 + atomicrmw or i16* %a, i16 0 release + ret void } -; CHECK: atomic128_load_unordered -; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 100501) +; CHECK: atomic16_or_release +; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 100508) -define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable { +define void @atomic16_xor_release(i16* %a) nounwind uwtable { entry: - %0 = load atomic i128* %a monotonic, align 16 - ret i128 %0 + atomicrmw xor i16* %a, i16 0 release + ret void } -; CHECK: atomic128_load_monotonic -; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 100501) +; CHECK: atomic16_xor_release +; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 100508) -define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable { +define void @atomic16_xchg_acq_rel(i16* %a) nounwind uwtable { entry: - %0 = load atomic i128* %a acquire, align 16 - ret i128 %0 + atomicrmw xchg i16* %a, i16 0 acq_rel + ret void } -; CHECK: atomic128_load_acquire -; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 100504) +; CHECK: atomic16_xchg_acq_rel +; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 100516) -define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable { +define void @atomic16_add_acq_rel(i16* %a) nounwind uwtable { entry: - %0 = load atomic i128* %a seq_cst, align 16 - ret i128 %0 + atomicrmw add i16* %a, i16 0 acq_rel + ret void } -; CHECK: atomic128_load_seq_cst -; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 100532) +; CHECK: atomic16_add_acq_rel +; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 100516) -define void @atomic128_store_unordered(i128* %a) nounwind uwtable { +define void @atomic16_sub_acq_rel(i16* %a) nounwind uwtable { entry: - store atomic i128 0, i128* %a unordered, align 16 + atomicrmw sub i16* %a, i16 0 acq_rel ret void } -; CHECK: atomic128_store_unordered -; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 100501) +; CHECK: atomic16_sub_acq_rel +; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 100516) -define void @atomic128_store_monotonic(i128* %a) nounwind uwtable { +define void @atomic16_and_acq_rel(i16* %a) nounwind uwtable { entry: - store atomic i128 0, i128* %a monotonic, align 16 + atomicrmw and i16* %a, i16 0 acq_rel ret void } -; CHECK: atomic128_store_monotonic -; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 100501) +; CHECK: atomic16_and_acq_rel +; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 100516) -define void @atomic128_store_release(i128* %a) nounwind uwtable { +define void @atomic16_or_acq_rel(i16* %a) nounwind uwtable { entry: - store atomic i128 0, i128* %a release, align 16 + atomicrmw or i16* %a, i16 0 acq_rel ret void } -; CHECK: atomic128_store_release -; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 100508) +; CHECK: atomic16_or_acq_rel +; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 100516) + +define void @atomic16_xor_acq_rel(i16* %a) nounwind uwtable { +entry: + atomicrmw xor i16* %a, i16 0 acq_rel + ret void +} +; CHECK: atomic16_xor_acq_rel +; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 100516) + +define void @atomic16_xchg_seq_cst(i16* %a) nounwind uwtable { +entry: + atomicrmw xchg i16* %a, i16 0 seq_cst + ret void +} +; CHECK: atomic16_xchg_seq_cst +; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 100532) + +define void @atomic16_add_seq_cst(i16* %a) nounwind uwtable { +entry: + atomicrmw add i16* %a, i16 0 seq_cst + ret void +} +; CHECK: atomic16_add_seq_cst +; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 100532) + +define void @atomic16_sub_seq_cst(i16* %a) nounwind uwtable { +entry: + atomicrmw sub i16* %a, i16 0 seq_cst + ret void +} +; CHECK: atomic16_sub_seq_cst +; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 100532) + +define void @atomic16_and_seq_cst(i16* %a) nounwind uwtable { +entry: + atomicrmw and i16* %a, i16 0 seq_cst + ret void +} +; CHECK: atomic16_and_seq_cst +; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 100532) + +define void @atomic16_or_seq_cst(i16* %a) nounwind uwtable { +entry: + atomicrmw or i16* %a, i16 0 seq_cst + ret void +} +; CHECK: atomic16_or_seq_cst +; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 100532) + +define void @atomic16_xor_seq_cst(i16* %a) nounwind uwtable { +entry: + atomicrmw xor i16* %a, i16 0 seq_cst + ret void +} +; CHECK: atomic16_xor_seq_cst +; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 100532) + +define void @atomic16_cas_monotonic(i16* %a) nounwind uwtable { +entry: + cmpxchg i16* %a, i16 0, i16 1 monotonic + ret void +} +; CHECK: atomic16_cas_monotonic +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 100501) + +define void @atomic16_cas_acquire(i16* %a) nounwind uwtable { +entry: + cmpxchg i16* %a, i16 0, i16 1 acquire + ret void +} +; CHECK: atomic16_cas_acquire +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 100504) + +define void @atomic16_cas_release(i16* %a) nounwind uwtable { +entry: + cmpxchg i16* %a, i16 0, i16 1 release + ret void +} +; CHECK: atomic16_cas_release +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 100508) + +define void @atomic16_cas_acq_rel(i16* %a) nounwind uwtable { +entry: + cmpxchg i16* %a, i16 0, i16 1 acq_rel + ret void +} +; CHECK: atomic16_cas_acq_rel +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 100516) + +define void @atomic16_cas_seq_cst(i16* %a) nounwind uwtable { +entry: + cmpxchg i16* %a, i16 0, i16 1 seq_cst + ret void +} +; CHECK: atomic16_cas_seq_cst +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 100532) + +define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable { +entry: + %0 = load atomic i32* %a unordered, align 4 + ret i32 %0 +} +; CHECK: atomic32_load_unordered +; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 100501) + +define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable { +entry: + %0 = load atomic i32* %a monotonic, align 4 + ret i32 %0 +} +; CHECK: atomic32_load_monotonic +; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 100501) + +define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable { +entry: + %0 = load atomic i32* %a acquire, align 4 + ret i32 %0 +} +; CHECK: atomic32_load_acquire +; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 100504) + +define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable { +entry: + %0 = load atomic i32* %a seq_cst, align 4 + ret i32 %0 +} +; CHECK: atomic32_load_seq_cst +; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 100532) + +define void @atomic32_store_unordered(i32* %a) nounwind uwtable { +entry: + store atomic i32 0, i32* %a unordered, align 4 + ret void +} +; CHECK: atomic32_store_unordered +; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 100501) + +define void @atomic32_store_monotonic(i32* %a) nounwind uwtable { +entry: + store atomic i32 0, i32* %a monotonic, align 4 + ret void +} +; CHECK: atomic32_store_monotonic +; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 100501) + +define void @atomic32_store_release(i32* %a) nounwind uwtable { +entry: + store atomic i32 0, i32* %a release, align 4 + ret void +} +; CHECK: atomic32_store_release +; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 100508) + +define void @atomic32_store_seq_cst(i32* %a) nounwind uwtable { +entry: + store atomic i32 0, i32* %a seq_cst, align 4 + ret void +} +; CHECK: atomic32_store_seq_cst +; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 100532) + +define void @atomic32_xchg_monotonic(i32* %a) nounwind uwtable { +entry: + atomicrmw xchg i32* %a, i32 0 monotonic + ret void +} +; CHECK: atomic32_xchg_monotonic +; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 100501) + +define void @atomic32_add_monotonic(i32* %a) nounwind uwtable { +entry: + atomicrmw add i32* %a, i32 0 monotonic + ret void +} +; CHECK: atomic32_add_monotonic +; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 100501) + +define void @atomic32_sub_monotonic(i32* %a) nounwind uwtable { +entry: + atomicrmw sub i32* %a, i32 0 monotonic + ret void +} +; CHECK: atomic32_sub_monotonic +; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 100501) + +define void @atomic32_and_monotonic(i32* %a) nounwind uwtable { +entry: + atomicrmw and i32* %a, i32 0 monotonic + ret void +} +; CHECK: atomic32_and_monotonic +; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 100501) + +define void @atomic32_or_monotonic(i32* %a) nounwind uwtable { +entry: + atomicrmw or i32* %a, i32 0 monotonic + ret void +} +; CHECK: atomic32_or_monotonic +; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 100501) + +define void @atomic32_xor_monotonic(i32* %a) nounwind uwtable { +entry: + atomicrmw xor i32* %a, i32 0 monotonic + ret void +} +; CHECK: atomic32_xor_monotonic +; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 100501) + +define void @atomic32_xchg_acquire(i32* %a) nounwind uwtable { +entry: + atomicrmw xchg i32* %a, i32 0 acquire + ret void +} +; CHECK: atomic32_xchg_acquire +; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 100504) + +define void @atomic32_add_acquire(i32* %a) nounwind uwtable { +entry: + atomicrmw add i32* %a, i32 0 acquire + ret void +} +; CHECK: atomic32_add_acquire +; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 100504) + +define void @atomic32_sub_acquire(i32* %a) nounwind uwtable { +entry: + atomicrmw sub i32* %a, i32 0 acquire + ret void +} +; CHECK: atomic32_sub_acquire +; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 100504) + +define void @atomic32_and_acquire(i32* %a) nounwind uwtable { +entry: + atomicrmw and i32* %a, i32 0 acquire + ret void +} +; CHECK: atomic32_and_acquire +; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 100504) + +define void @atomic32_or_acquire(i32* %a) nounwind uwtable { +entry: + atomicrmw or i32* %a, i32 0 acquire + ret void +} +; CHECK: atomic32_or_acquire +; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 100504) + +define void @atomic32_xor_acquire(i32* %a) nounwind uwtable { +entry: + atomicrmw xor i32* %a, i32 0 acquire + ret void +} +; CHECK: atomic32_xor_acquire +; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 100504) + +define void @atomic32_xchg_release(i32* %a) nounwind uwtable { +entry: + atomicrmw xchg i32* %a, i32 0 release + ret void +} +; CHECK: atomic32_xchg_release +; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 100508) + +define void @atomic32_add_release(i32* %a) nounwind uwtable { +entry: + atomicrmw add i32* %a, i32 0 release + ret void +} +; CHECK: atomic32_add_release +; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 100508) + +define void @atomic32_sub_release(i32* %a) nounwind uwtable { +entry: + atomicrmw sub i32* %a, i32 0 release + ret void +} +; CHECK: atomic32_sub_release +; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 100508) + +define void @atomic32_and_release(i32* %a) nounwind uwtable { +entry: + atomicrmw and i32* %a, i32 0 release + ret void +} +; CHECK: atomic32_and_release +; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 100508) + +define void @atomic32_or_release(i32* %a) nounwind uwtable { +entry: + atomicrmw or i32* %a, i32 0 release + ret void +} +; CHECK: atomic32_or_release +; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 100508) + +define void @atomic32_xor_release(i32* %a) nounwind uwtable { +entry: + atomicrmw xor i32* %a, i32 0 release + ret void +} +; CHECK: atomic32_xor_release +; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 100508) + +define void @atomic32_xchg_acq_rel(i32* %a) nounwind uwtable { +entry: + atomicrmw xchg i32* %a, i32 0 acq_rel + ret void +} +; CHECK: atomic32_xchg_acq_rel +; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 100516) + +define void @atomic32_add_acq_rel(i32* %a) nounwind uwtable { +entry: + atomicrmw add i32* %a, i32 0 acq_rel + ret void +} +; CHECK: atomic32_add_acq_rel +; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 100516) + +define void @atomic32_sub_acq_rel(i32* %a) nounwind uwtable { +entry: + atomicrmw sub i32* %a, i32 0 acq_rel + ret void +} +; CHECK: atomic32_sub_acq_rel +; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 100516) + +define void @atomic32_and_acq_rel(i32* %a) nounwind uwtable { +entry: + atomicrmw and i32* %a, i32 0 acq_rel + ret void +} +; CHECK: atomic32_and_acq_rel +; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 100516) + +define void @atomic32_or_acq_rel(i32* %a) nounwind uwtable { +entry: + atomicrmw or i32* %a, i32 0 acq_rel + ret void +} +; CHECK: atomic32_or_acq_rel +; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 100516) + +define void @atomic32_xor_acq_rel(i32* %a) nounwind uwtable { +entry: + atomicrmw xor i32* %a, i32 0 acq_rel + ret void +} +; CHECK: atomic32_xor_acq_rel +; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 100516) + +define void @atomic32_xchg_seq_cst(i32* %a) nounwind uwtable { +entry: + atomicrmw xchg i32* %a, i32 0 seq_cst + ret void +} +; CHECK: atomic32_xchg_seq_cst +; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 100532) + +define void @atomic32_add_seq_cst(i32* %a) nounwind uwtable { +entry: + atomicrmw add i32* %a, i32 0 seq_cst + ret void +} +; CHECK: atomic32_add_seq_cst +; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 100532) + +define void @atomic32_sub_seq_cst(i32* %a) nounwind uwtable { +entry: + atomicrmw sub i32* %a, i32 0 seq_cst + ret void +} +; CHECK: atomic32_sub_seq_cst +; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 100532) + +define void @atomic32_and_seq_cst(i32* %a) nounwind uwtable { +entry: + atomicrmw and i32* %a, i32 0 seq_cst + ret void +} +; CHECK: atomic32_and_seq_cst +; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 100532) + +define void @atomic32_or_seq_cst(i32* %a) nounwind uwtable { +entry: + atomicrmw or i32* %a, i32 0 seq_cst + ret void +} +; CHECK: atomic32_or_seq_cst +; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 100532) + +define void @atomic32_xor_seq_cst(i32* %a) nounwind uwtable { +entry: + atomicrmw xor i32* %a, i32 0 seq_cst + ret void +} +; CHECK: atomic32_xor_seq_cst +; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 100532) + +define void @atomic32_cas_monotonic(i32* %a) nounwind uwtable { +entry: + cmpxchg i32* %a, i32 0, i32 1 monotonic + ret void +} +; CHECK: atomic32_cas_monotonic +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 100501) + +define void @atomic32_cas_acquire(i32* %a) nounwind uwtable { +entry: + cmpxchg i32* %a, i32 0, i32 1 acquire + ret void +} +; CHECK: atomic32_cas_acquire +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 100504) + +define void @atomic32_cas_release(i32* %a) nounwind uwtable { +entry: + cmpxchg i32* %a, i32 0, i32 1 release + ret void +} +; CHECK: atomic32_cas_release +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 100508) + +define void @atomic32_cas_acq_rel(i32* %a) nounwind uwtable { +entry: + cmpxchg i32* %a, i32 0, i32 1 acq_rel + ret void +} +; CHECK: atomic32_cas_acq_rel +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 100516) + +define void @atomic32_cas_seq_cst(i32* %a) nounwind uwtable { +entry: + cmpxchg i32* %a, i32 0, i32 1 seq_cst + ret void +} +; CHECK: atomic32_cas_seq_cst +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 100532) + +define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable { +entry: + %0 = load atomic i64* %a unordered, align 8 + ret i64 %0 +} +; CHECK: atomic64_load_unordered +; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 100501) + +define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable { +entry: + %0 = load atomic i64* %a monotonic, align 8 + ret i64 %0 +} +; CHECK: atomic64_load_monotonic +; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 100501) + +define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable { +entry: + %0 = load atomic i64* %a acquire, align 8 + ret i64 %0 +} +; CHECK: atomic64_load_acquire +; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 100504) + +define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable { +entry: + %0 = load atomic i64* %a seq_cst, align 8 + ret i64 %0 +} +; CHECK: atomic64_load_seq_cst +; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 100532) + +define void @atomic64_store_unordered(i64* %a) nounwind uwtable { +entry: + store atomic i64 0, i64* %a unordered, align 8 + ret void +} +; CHECK: atomic64_store_unordered +; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 100501) + +define void @atomic64_store_monotonic(i64* %a) nounwind uwtable { +entry: + store atomic i64 0, i64* %a monotonic, align 8 + ret void +} +; CHECK: atomic64_store_monotonic +; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 100501) + +define void @atomic64_store_release(i64* %a) nounwind uwtable { +entry: + store atomic i64 0, i64* %a release, align 8 + ret void +} +; CHECK: atomic64_store_release +; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 100508) + +define void @atomic64_store_seq_cst(i64* %a) nounwind uwtable { +entry: + store atomic i64 0, i64* %a seq_cst, align 8 + ret void +} +; CHECK: atomic64_store_seq_cst +; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 100532) + +define void @atomic64_xchg_monotonic(i64* %a) nounwind uwtable { +entry: + atomicrmw xchg i64* %a, i64 0 monotonic + ret void +} +; CHECK: atomic64_xchg_monotonic +; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 100501) + +define void @atomic64_add_monotonic(i64* %a) nounwind uwtable { +entry: + atomicrmw add i64* %a, i64 0 monotonic + ret void +} +; CHECK: atomic64_add_monotonic +; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 100501) + +define void @atomic64_sub_monotonic(i64* %a) nounwind uwtable { +entry: + atomicrmw sub i64* %a, i64 0 monotonic + ret void +} +; CHECK: atomic64_sub_monotonic +; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 100501) + +define void @atomic64_and_monotonic(i64* %a) nounwind uwtable { +entry: + atomicrmw and i64* %a, i64 0 monotonic + ret void +} +; CHECK: atomic64_and_monotonic +; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 100501) + +define void @atomic64_or_monotonic(i64* %a) nounwind uwtable { +entry: + atomicrmw or i64* %a, i64 0 monotonic + ret void +} +; CHECK: atomic64_or_monotonic +; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 100501) + +define void @atomic64_xor_monotonic(i64* %a) nounwind uwtable { +entry: + atomicrmw xor i64* %a, i64 0 monotonic + ret void +} +; CHECK: atomic64_xor_monotonic +; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 100501) + +define void @atomic64_xchg_acquire(i64* %a) nounwind uwtable { +entry: + atomicrmw xchg i64* %a, i64 0 acquire + ret void +} +; CHECK: atomic64_xchg_acquire +; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 100504) + +define void @atomic64_add_acquire(i64* %a) nounwind uwtable { +entry: + atomicrmw add i64* %a, i64 0 acquire + ret void +} +; CHECK: atomic64_add_acquire +; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 100504) + +define void @atomic64_sub_acquire(i64* %a) nounwind uwtable { +entry: + atomicrmw sub i64* %a, i64 0 acquire + ret void +} +; CHECK: atomic64_sub_acquire +; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 100504) + +define void @atomic64_and_acquire(i64* %a) nounwind uwtable { +entry: + atomicrmw and i64* %a, i64 0 acquire + ret void +} +; CHECK: atomic64_and_acquire +; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 100504) + +define void @atomic64_or_acquire(i64* %a) nounwind uwtable { +entry: + atomicrmw or i64* %a, i64 0 acquire + ret void +} +; CHECK: atomic64_or_acquire +; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 100504) + +define void @atomic64_xor_acquire(i64* %a) nounwind uwtable { +entry: + atomicrmw xor i64* %a, i64 0 acquire + ret void +} +; CHECK: atomic64_xor_acquire +; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 100504) + +define void @atomic64_xchg_release(i64* %a) nounwind uwtable { +entry: + atomicrmw xchg i64* %a, i64 0 release + ret void +} +; CHECK: atomic64_xchg_release +; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 100508) + +define void @atomic64_add_release(i64* %a) nounwind uwtable { +entry: + atomicrmw add i64* %a, i64 0 release + ret void +} +; CHECK: atomic64_add_release +; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 100508) + +define void @atomic64_sub_release(i64* %a) nounwind uwtable { +entry: + atomicrmw sub i64* %a, i64 0 release + ret void +} +; CHECK: atomic64_sub_release +; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 100508) + +define void @atomic64_and_release(i64* %a) nounwind uwtable { +entry: + atomicrmw and i64* %a, i64 0 release + ret void +} +; CHECK: atomic64_and_release +; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 100508) + +define void @atomic64_or_release(i64* %a) nounwind uwtable { +entry: + atomicrmw or i64* %a, i64 0 release + ret void +} +; CHECK: atomic64_or_release +; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 100508) + +define void @atomic64_xor_release(i64* %a) nounwind uwtable { +entry: + atomicrmw xor i64* %a, i64 0 release + ret void +} +; CHECK: atomic64_xor_release +; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 100508) + +define void @atomic64_xchg_acq_rel(i64* %a) nounwind uwtable { +entry: + atomicrmw xchg i64* %a, i64 0 acq_rel + ret void +} +; CHECK: atomic64_xchg_acq_rel +; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 100516) + +define void @atomic64_add_acq_rel(i64* %a) nounwind uwtable { +entry: + atomicrmw add i64* %a, i64 0 acq_rel + ret void +} +; CHECK: atomic64_add_acq_rel +; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 100516) + +define void @atomic64_sub_acq_rel(i64* %a) nounwind uwtable { +entry: + atomicrmw sub i64* %a, i64 0 acq_rel + ret void +} +; CHECK: atomic64_sub_acq_rel +; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 100516) + +define void @atomic64_and_acq_rel(i64* %a) nounwind uwtable { +entry: + atomicrmw and i64* %a, i64 0 acq_rel + ret void +} +; CHECK: atomic64_and_acq_rel +; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 100516) + +define void @atomic64_or_acq_rel(i64* %a) nounwind uwtable { +entry: + atomicrmw or i64* %a, i64 0 acq_rel + ret void +} +; CHECK: atomic64_or_acq_rel +; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 100516) + +define void @atomic64_xor_acq_rel(i64* %a) nounwind uwtable { +entry: + atomicrmw xor i64* %a, i64 0 acq_rel + ret void +} +; CHECK: atomic64_xor_acq_rel +; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 100516) + +define void @atomic64_xchg_seq_cst(i64* %a) nounwind uwtable { +entry: + atomicrmw xchg i64* %a, i64 0 seq_cst + ret void +} +; CHECK: atomic64_xchg_seq_cst +; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 100532) + +define void @atomic64_add_seq_cst(i64* %a) nounwind uwtable { +entry: + atomicrmw add i64* %a, i64 0 seq_cst + ret void +} +; CHECK: atomic64_add_seq_cst +; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 100532) + +define void @atomic64_sub_seq_cst(i64* %a) nounwind uwtable { +entry: + atomicrmw sub i64* %a, i64 0 seq_cst + ret void +} +; CHECK: atomic64_sub_seq_cst +; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 100532) + +define void @atomic64_and_seq_cst(i64* %a) nounwind uwtable { +entry: + atomicrmw and i64* %a, i64 0 seq_cst + ret void +} +; CHECK: atomic64_and_seq_cst +; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 100532) + +define void @atomic64_or_seq_cst(i64* %a) nounwind uwtable { +entry: + atomicrmw or i64* %a, i64 0 seq_cst + ret void +} +; CHECK: atomic64_or_seq_cst +; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 100532) + +define void @atomic64_xor_seq_cst(i64* %a) nounwind uwtable { +entry: + atomicrmw xor i64* %a, i64 0 seq_cst + ret void +} +; CHECK: atomic64_xor_seq_cst +; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 100532) + +define void @atomic64_cas_monotonic(i64* %a) nounwind uwtable { +entry: + cmpxchg i64* %a, i64 0, i64 1 monotonic + ret void +} +; CHECK: atomic64_cas_monotonic +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 100501) + +define void @atomic64_cas_acquire(i64* %a) nounwind uwtable { +entry: + cmpxchg i64* %a, i64 0, i64 1 acquire + ret void +} +; CHECK: atomic64_cas_acquire +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 100504) + +define void @atomic64_cas_release(i64* %a) nounwind uwtable { +entry: + cmpxchg i64* %a, i64 0, i64 1 release + ret void +} +; CHECK: atomic64_cas_release +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 100508) + +define void @atomic64_cas_acq_rel(i64* %a) nounwind uwtable { +entry: + cmpxchg i64* %a, i64 0, i64 1 acq_rel + ret void +} +; CHECK: atomic64_cas_acq_rel +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 100516) + +define void @atomic64_cas_seq_cst(i64* %a) nounwind uwtable { +entry: + cmpxchg i64* %a, i64 0, i64 1 seq_cst + ret void +} +; CHECK: atomic64_cas_seq_cst +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 100532) + +define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable { +entry: + %0 = load atomic i128* %a unordered, align 16 + ret i128 %0 +} +; CHECK: atomic128_load_unordered +; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 100501) + +define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable { +entry: + %0 = load atomic i128* %a monotonic, align 16 + ret i128 %0 +} +; CHECK: atomic128_load_monotonic +; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 100501) + +define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable { +entry: + %0 = load atomic i128* %a acquire, align 16 + ret i128 %0 +} +; CHECK: atomic128_load_acquire +; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 100504) + +define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable { +entry: + %0 = load atomic i128* %a seq_cst, align 16 + ret i128 %0 +} +; CHECK: atomic128_load_seq_cst +; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 100532) + +define void @atomic128_store_unordered(i128* %a) nounwind uwtable { +entry: + store atomic i128 0, i128* %a unordered, align 16 + ret void +} +; CHECK: atomic128_store_unordered +; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 100501) + +define void @atomic128_store_monotonic(i128* %a) nounwind uwtable { +entry: + store atomic i128 0, i128* %a monotonic, align 16 + ret void +} +; CHECK: atomic128_store_monotonic +; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 100501) + +define void @atomic128_store_release(i128* %a) nounwind uwtable { +entry: + store atomic i128 0, i128* %a release, align 16 + ret void +} +; CHECK: atomic128_store_release +; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 100508) define void @atomic128_store_seq_cst(i128* %a) nounwind uwtable { entry: @@ -321,3 +1441,347 @@ entry: } ; CHECK: atomic128_store_seq_cst ; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 100532) + +define void @atomic128_xchg_monotonic(i128* %a) nounwind uwtable { +entry: + atomicrmw xchg i128* %a, i128 0 monotonic + ret void +} +; CHECK: atomic128_xchg_monotonic +; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 100501) + +define void @atomic128_add_monotonic(i128* %a) nounwind uwtable { +entry: + atomicrmw add i128* %a, i128 0 monotonic + ret void +} +; CHECK: atomic128_add_monotonic +; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 100501) + +define void @atomic128_sub_monotonic(i128* %a) nounwind uwtable { +entry: + atomicrmw sub i128* %a, i128 0 monotonic + ret void +} +; CHECK: atomic128_sub_monotonic +; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 100501) + +define void @atomic128_and_monotonic(i128* %a) nounwind uwtable { +entry: + atomicrmw and i128* %a, i128 0 monotonic + ret void +} +; CHECK: atomic128_and_monotonic +; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 100501) + +define void @atomic128_or_monotonic(i128* %a) nounwind uwtable { +entry: + atomicrmw or i128* %a, i128 0 monotonic + ret void +} +; CHECK: atomic128_or_monotonic +; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 100501) + +define void @atomic128_xor_monotonic(i128* %a) nounwind uwtable { +entry: + atomicrmw xor i128* %a, i128 0 monotonic + ret void +} +; CHECK: atomic128_xor_monotonic +; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 100501) + +define void @atomic128_xchg_acquire(i128* %a) nounwind uwtable { +entry: + atomicrmw xchg i128* %a, i128 0 acquire + ret void +} +; CHECK: atomic128_xchg_acquire +; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 100504) + +define void @atomic128_add_acquire(i128* %a) nounwind uwtable { +entry: + atomicrmw add i128* %a, i128 0 acquire + ret void +} +; CHECK: atomic128_add_acquire +; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 100504) + +define void @atomic128_sub_acquire(i128* %a) nounwind uwtable { +entry: + atomicrmw sub i128* %a, i128 0 acquire + ret void +} +; CHECK: atomic128_sub_acquire +; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 100504) + +define void @atomic128_and_acquire(i128* %a) nounwind uwtable { +entry: + atomicrmw and i128* %a, i128 0 acquire + ret void +} +; CHECK: atomic128_and_acquire +; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 100504) + +define void @atomic128_or_acquire(i128* %a) nounwind uwtable { +entry: + atomicrmw or i128* %a, i128 0 acquire + ret void +} +; CHECK: atomic128_or_acquire +; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 100504) + +define void @atomic128_xor_acquire(i128* %a) nounwind uwtable { +entry: + atomicrmw xor i128* %a, i128 0 acquire + ret void +} +; CHECK: atomic128_xor_acquire +; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 100504) + +define void @atomic128_xchg_release(i128* %a) nounwind uwtable { +entry: + atomicrmw xchg i128* %a, i128 0 release + ret void +} +; CHECK: atomic128_xchg_release +; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 100508) + +define void @atomic128_add_release(i128* %a) nounwind uwtable { +entry: + atomicrmw add i128* %a, i128 0 release + ret void +} +; CHECK: atomic128_add_release +; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 100508) + +define void @atomic128_sub_release(i128* %a) nounwind uwtable { +entry: + atomicrmw sub i128* %a, i128 0 release + ret void +} +; CHECK: atomic128_sub_release +; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 100508) + +define void @atomic128_and_release(i128* %a) nounwind uwtable { +entry: + atomicrmw and i128* %a, i128 0 release + ret void +} +; CHECK: atomic128_and_release +; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 100508) + +define void @atomic128_or_release(i128* %a) nounwind uwtable { +entry: + atomicrmw or i128* %a, i128 0 release + ret void +} +; CHECK: atomic128_or_release +; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 100508) + +define void @atomic128_xor_release(i128* %a) nounwind uwtable { +entry: + atomicrmw xor i128* %a, i128 0 release + ret void +} +; CHECK: atomic128_xor_release +; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 100508) + +define void @atomic128_xchg_acq_rel(i128* %a) nounwind uwtable { +entry: + atomicrmw xchg i128* %a, i128 0 acq_rel + ret void +} +; CHECK: atomic128_xchg_acq_rel +; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 100516) + +define void @atomic128_add_acq_rel(i128* %a) nounwind uwtable { +entry: + atomicrmw add i128* %a, i128 0 acq_rel + ret void +} +; CHECK: atomic128_add_acq_rel +; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 100516) + +define void @atomic128_sub_acq_rel(i128* %a) nounwind uwtable { +entry: + atomicrmw sub i128* %a, i128 0 acq_rel + ret void +} +; CHECK: atomic128_sub_acq_rel +; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 100516) + +define void @atomic128_and_acq_rel(i128* %a) nounwind uwtable { +entry: + atomicrmw and i128* %a, i128 0 acq_rel + ret void +} +; CHECK: atomic128_and_acq_rel +; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 100516) + +define void @atomic128_or_acq_rel(i128* %a) nounwind uwtable { +entry: + atomicrmw or i128* %a, i128 0 acq_rel + ret void +} +; CHECK: atomic128_or_acq_rel +; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 100516) + +define void @atomic128_xor_acq_rel(i128* %a) nounwind uwtable { +entry: + atomicrmw xor i128* %a, i128 0 acq_rel + ret void +} +; CHECK: atomic128_xor_acq_rel +; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 100516) + +define void @atomic128_xchg_seq_cst(i128* %a) nounwind uwtable { +entry: + atomicrmw xchg i128* %a, i128 0 seq_cst + ret void +} +; CHECK: atomic128_xchg_seq_cst +; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 100532) + +define void @atomic128_add_seq_cst(i128* %a) nounwind uwtable { +entry: + atomicrmw add i128* %a, i128 0 seq_cst + ret void +} +; CHECK: atomic128_add_seq_cst +; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 100532) + +define void @atomic128_sub_seq_cst(i128* %a) nounwind uwtable { +entry: + atomicrmw sub i128* %a, i128 0 seq_cst + ret void +} +; CHECK: atomic128_sub_seq_cst +; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 100532) + +define void @atomic128_and_seq_cst(i128* %a) nounwind uwtable { +entry: + atomicrmw and i128* %a, i128 0 seq_cst + ret void +} +; CHECK: atomic128_and_seq_cst +; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 100532) + +define void @atomic128_or_seq_cst(i128* %a) nounwind uwtable { +entry: + atomicrmw or i128* %a, i128 0 seq_cst + ret void +} +; CHECK: atomic128_or_seq_cst +; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 100532) + +define void @atomic128_xor_seq_cst(i128* %a) nounwind uwtable { +entry: + atomicrmw xor i128* %a, i128 0 seq_cst + ret void +} +; CHECK: atomic128_xor_seq_cst +; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 100532) + +define void @atomic128_cas_monotonic(i128* %a) nounwind uwtable { +entry: + cmpxchg i128* %a, i128 0, i128 1 monotonic + ret void +} +; CHECK: atomic128_cas_monotonic +; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 100501) + +define void @atomic128_cas_acquire(i128* %a) nounwind uwtable { +entry: + cmpxchg i128* %a, i128 0, i128 1 acquire + ret void +} +; CHECK: atomic128_cas_acquire +; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 100504) + +define void @atomic128_cas_release(i128* %a) nounwind uwtable { +entry: + cmpxchg i128* %a, i128 0, i128 1 release + ret void +} +; CHECK: atomic128_cas_release +; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 100508) + +define void @atomic128_cas_acq_rel(i128* %a) nounwind uwtable { +entry: + cmpxchg i128* %a, i128 0, i128 1 acq_rel + ret void +} +; CHECK: atomic128_cas_acq_rel +; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 100516) + +define void @atomic128_cas_seq_cst(i128* %a) nounwind uwtable { +entry: + cmpxchg i128* %a, i128 0, i128 1 seq_cst + ret void +} +; CHECK: atomic128_cas_seq_cst +; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 100532) + +define void @atomic_signal_fence_acquire() nounwind uwtable { +entry: + fence singlethread acquire + ret void +} +; CHECK: atomic_signal_fence_acquire +; CHECK: call void @__tsan_atomic_signal_fence(i32 100504) + +define void @atomic_thread_fence_acquire() nounwind uwtable { +entry: + fence acquire + ret void +} +; CHECK: atomic_thread_fence_acquire +; CHECK: call void @__tsan_atomic_thread_fence(i32 100504) + +define void @atomic_signal_fence_release() nounwind uwtable { +entry: + fence singlethread release + ret void +} +; CHECK: atomic_signal_fence_release +; CHECK: call void @__tsan_atomic_signal_fence(i32 100508) + +define void @atomic_thread_fence_release() nounwind uwtable { +entry: + fence release + ret void +} +; CHECK: atomic_thread_fence_release +; CHECK: call void @__tsan_atomic_thread_fence(i32 100508) + +define void @atomic_signal_fence_acq_rel() nounwind uwtable { +entry: + fence singlethread acq_rel + ret void +} +; CHECK: atomic_signal_fence_acq_rel +; CHECK: call void @__tsan_atomic_signal_fence(i32 100516) + +define void @atomic_thread_fence_acq_rel() nounwind uwtable { +entry: + fence acq_rel + ret void +} +; CHECK: atomic_thread_fence_acq_rel +; CHECK: call void @__tsan_atomic_thread_fence(i32 100516) + +define void @atomic_signal_fence_seq_cst() nounwind uwtable { +entry: + fence singlethread seq_cst + ret void +} +; CHECK: atomic_signal_fence_seq_cst +; CHECK: call void @__tsan_atomic_signal_fence(i32 100532) + +define void @atomic_thread_fence_seq_cst() nounwind uwtable { +entry: + fence seq_cst + ret void +} +; CHECK: atomic_thread_fence_seq_cst +; CHECK: call void @__tsan_atomic_thread_fence(i32 100532) -- cgit v1.2.3