From: Chandler Carruth Date: Sun, 18 Jul 2010 20:54:12 +0000 (+0000) Subject: Fix a goof in my previous patch -- not all of the builtins return a value, some X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=8d13d221cf7c1657404c611efaadf3ac19d899b3;p=clang Fix a goof in my previous patch -- not all of the builtins return a value, some fixed return types. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@108657 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp index bedea18203..f1e501651e 100644 --- a/lib/Sema/SemaChecking.cpp +++ b/lib/Sema/SemaChecking.cpp @@ -419,6 +419,10 @@ Sema::SemaBuiltinAtomicOverloaded(OwningExprResult TheCallResult) { return ExprError(); } + // The majority of builtins return a value, but a few have special return + // types, so allow them to override appropriately below. + QualType ResultType = ValType; + // We need to figure out which concrete builtin this maps onto. For example, // __sync_fetch_and_add with a 2 byte object turns into // __sync_fetch_and_add_2. @@ -487,11 +491,13 @@ Sema::SemaBuiltinAtomicOverloaded(OwningExprResult TheCallResult) { case Builtin::BI__sync_bool_compare_and_swap: BuiltinIndex = 11; NumFixed = 2; + ResultType = Context.BoolTy; break; case Builtin::BI__sync_lock_test_and_set: BuiltinIndex = 12; break; case Builtin::BI__sync_lock_release: BuiltinIndex = 13; NumFixed = 0; + ResultType = Context.VoidTy; break; } @@ -558,7 +564,7 @@ Sema::SemaBuiltinAtomicOverloaded(OwningExprResult TheCallResult) { // Change the result type of the call to match the original value type. This // is arbitrary, but the codegen for these builtins ins design to handle it // gracefully. - TheCall->setType(ValType); + TheCall->setType(ResultType); return move(TheCallResult); } diff --git a/test/CodeGen/atomic.c b/test/CodeGen/atomic.c index 8b66bfd660..d0a7e04eaa 100644 --- a/test/CodeGen/atomic.c +++ b/test/CodeGen/atomic.c @@ -1,5 +1,5 @@ // RUN: %clang_cc1 %s -emit-llvm -o - -triple=i686-apple-darwin9 > %t1 -// RUN: grep @llvm.memory.barrier %t1 | count 40 +// RUN: grep @llvm.memory.barrier %t1 | count 42 // RUN: grep @llvm.atomic.load.add.i32 %t1 | count 3 // RUN: grep @llvm.atomic.load.sub.i8 %t1 | count 2 // RUN: grep @llvm.atomic.load.min.i32 %t1 @@ -7,7 +7,7 @@ // RUN: grep @llvm.atomic.load.umin.i32 %t1 // RUN: grep @llvm.atomic.load.umax.i32 %t1 // RUN: grep @llvm.atomic.swap.i32 %t1 -// RUN: grep @llvm.atomic.cmp.swap.i32 %t1 | count 4 +// RUN: grep @llvm.atomic.cmp.swap.i32 %t1 | count 5 // RUN: grep @llvm.atomic.load.and.i32 %t1 // RUN: grep @llvm.atomic.load.or.i8 %t1 // RUN: grep @llvm.atomic.load.xor.i8 %t1 @@ -47,10 +47,15 @@ int atomic(void) if ( __sync_val_compare_and_swap(&valb, 0, 1)) { old = 42; } - + __sync_bool_compare_and_swap((void **)0, (void *)0, (void *)0); __sync_lock_release(&val); __sync_synchronize (); return old; } + +void release_return(int *lock) { + // Ensure this is actually returning void all the way through. + return __sync_lock_release(lock); +}