From: David Majnemer Date: Fri, 22 Jan 2016 16:36:44 +0000 (+0000) Subject: [MSVC Compat] Don't provide /volatile:ms semantics to types > pointer X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=a3f4011422a41159591ae6ace0af7031c87f2bdc;p=clang [MSVC Compat] Don't provide /volatile:ms semantics to types > pointer Volatile loads of type wider than a pointer get split by MSVC because the base x86 ISA doesn't provide loads which are wider than pointer width. LLVM assumes that it can emit an cmpxchg8b but this is problematic if the memory is in a CONST memory segment. Instead, provide behavior compatible with MSVC: split loads wider than a pointer. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@258506 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp index 1ef5d1035a..4e52c3630c 100644 --- a/lib/CodeGen/CGAtomic.cpp +++ b/lib/CodeGen/CGAtomic.cpp @@ -1295,10 +1295,23 @@ bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { /// performing such an operation can be performed without a libcall. bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const { + // The operation must be volatile for us to make it atomic. + if (!IsVolatile) + return false; + // The -fms-volatile flag must be passed for us to adopt this behavior. + if (!CGM.getCodeGenOpts().MSVolatile) + return false; + // An atomic is inline if we don't need to use a libcall (e.g. it is builtin). - bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic( - getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty)); - return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline; + if (!getContext().getTargetInfo().hasBuiltinAtomic( + getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty))) + return false; + + // MSVC doesn't seem to do this for types wider than a pointer. + if (getContext().getTypeSize(Ty) > + getContext().getTypeSize(getContext().getIntPtrType())) + return false; + return true; } RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL, diff --git a/test/CodeGen/ms-volatile.c b/test/CodeGen/ms-volatile.c index 87393e794f..242ce067d6 100644 --- a/test/CodeGen/ms-volatile.c +++ b/test/CodeGen/ms-volatile.c @@ -52,11 +52,23 @@ void test7(volatile struct bar *p, volatile struct bar *q) { void test8(volatile double *p, volatile double *q) { *p = *q; // CHECK-LABEL: @test8 - // CHECK: load atomic volatile {{.*}} acquire - // CHECK: store atomic volatile {{.*}}, {{.*}} release + // CHECK: load volatile {{.*}} + // CHECK: store volatile {{.*}}, {{.*}} } void test9(volatile baz *p, baz *q) { *p = *q; // CHECK-LABEL: @test9 // CHECK: store atomic volatile {{.*}}, {{.*}} release } +void test10(volatile long long *p, volatile long long *q) { + *p = *q; + // CHECK-LABEL: @test10 + // CHECK: load volatile {{.*}} + // CHECK: store volatile {{.*}}, {{.*}} +} +void test11(volatile float *p, volatile float *q) { + *p = *q; + // CHECK-LABEL: @test11 + // CHECK: load atomic volatile {{.*}} acquire + // CHECK: store atomic volatile {{.*}}, {{.*}} release +}