From: Craig Topper Date: Tue, 19 Feb 2019 20:12:20 +0000 (+0000) Subject: [X86] Don't consider functions ABI compatible for ArgumentPromotion pass if they... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=82fbd87152f08ccf6d60dce38c0ba566e497f429;p=llvm [X86] Don't consider functions ABI compatible for ArgumentPromotion pass if they view 512-bit vectors differently. The use of the -mprefer-vector-width=256 command line option mixed with functions using vector intrinsics can create situations where one function thinks 512 vectors are legal, but another fucntion does not. If a 512 bit vector is passed between them via a pointer, its possible ArgumentPromotion might try to pass by value instead. This will result in type legalization for the two functions handling the 512 bit vector differently leading to runtime failures. Had the 512 bit vector been passed by value from clang codegen, both functions would have been tagged with a min-legal-vector-width=512 function attribute. That would make them be legalized the same way. I observed this issue in 32-bit mode where a union containing a 512 bit vector was being passed by a function that used intrinsics to one that did not. The caller ended up passing in zmm0 and the callee tried to read it from ymm0 and ymm1. The fix implemented here is just to consider it a mismatch if two functions would handle 512 bit differently without looking at the types that are being considered. This is the easist and safest fix, but it can be improved in the future. Differential Revision: https://reviews.llvm.org/D58390 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@354376 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp index a7ecfc2e586..57dcef20896 100644 --- a/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/lib/Target/X86/X86TargetTransformInfo.cpp @@ -3070,6 +3070,22 @@ bool X86TTIImpl::areInlineCompatible(const Function *Caller, return (RealCallerBits & RealCalleeBits) == RealCalleeBits; } +bool X86TTIImpl::areFunctionArgsABICompatible( + const Function *Caller, const Function *Callee, + SmallPtrSetImpl &Args) const { + if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args)) + return false; + + // If we get here, we know the target features match. If one function + // considers 512-bit vectors legal and the other does not, consider them + // incompatible. + // FIXME Look at the arguments and only consider 512 bit or larger vectors? + const TargetMachine &TM = getTLI()->getTargetMachine(); + + return TM.getSubtarget(*Caller).useAVX512Regs() == + TM.getSubtarget(*Callee).useAVX512Regs(); +} + const X86TTIImpl::TTI::MemCmpExpansionOptions * X86TTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const { // Only enable vector loads for equality comparison. diff --git a/lib/Target/X86/X86TargetTransformInfo.h b/lib/Target/X86/X86TargetTransformInfo.h index 5035818fde9..36a941da66d 100644 --- a/lib/Target/X86/X86TargetTransformInfo.h +++ b/lib/Target/X86/X86TargetTransformInfo.h @@ -189,6 +189,9 @@ public: bool isFCmpOrdCheaperThanFCmpZero(Type *Ty); bool areInlineCompatible(const Function *Caller, const Function *Callee) const; + bool areFunctionArgsABICompatible(const Function *Caller, + const Function *Callee, + SmallPtrSetImpl &Args) const; const TTI::MemCmpExpansionOptions *enableMemCmpExpansion( bool IsZeroCmp) const; bool enableInterleavedAccessVectorization(); diff --git a/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll b/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll new file mode 100644 index 00000000000..59ac7d3c195 --- /dev/null +++ b/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll @@ -0,0 +1,184 @@ +; RUN: opt -S -argpromotion < %s | FileCheck %s +; RUN: opt -S -passes=argpromotion < %s | FileCheck %s +; Test that we only promote arguments when the caller/callee have compatible +; function attrubtes. + +target triple = "x86_64-unknown-linux-gnu" + +; This should promote +; CHECK-LABEL: @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64> %arg1.val) +define internal fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #0 { +bb: + %tmp = load <8 x i64>, <8 x i64>* %arg1 + store <8 x i64> %tmp, <8 x i64>* %arg + ret void +} + +define void @avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %arg) #0 { +bb: + %tmp = alloca <8 x i64>, align 32 + %tmp2 = alloca <8 x i64>, align 32 + %tmp3 = bitcast <8 x i64>* %tmp to i8* + call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) + call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %tmp2, <8 x i64>* %tmp) + %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 + store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 + ret void +} + +; This should promote +; CHECK-LABEL: @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64> %arg1.val) +define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 { +bb: + %tmp = load <8 x i64>, <8 x i64>* %arg1 + store <8 x i64> %tmp, <8 x i64>* %arg + ret void +} + +define void @avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg) #1 { +bb: + %tmp = alloca <8 x i64>, align 32 + %tmp2 = alloca <8 x i64>, align 32 + %tmp3 = bitcast <8 x i64>* %tmp to i8* + call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) + call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) + %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 + store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 + ret void +} + +; This should promote +; CHECK-LABEL: @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64> %arg1.val) +define internal fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 { +bb: + %tmp = load <8 x i64>, <8 x i64>* %arg1 + store <8 x i64> %tmp, <8 x i64>* %arg + ret void +} + +define void @avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %arg) #0 { +bb: + %tmp = alloca <8 x i64>, align 32 + %tmp2 = alloca <8 x i64>, align 32 + %tmp3 = bitcast <8 x i64>* %tmp to i8* + call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) + call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) + %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 + store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 + ret void +} + +; This should promote +; CHECK-LABEL: @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64> %arg1.val) +define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #0 { +bb: + %tmp = load <8 x i64>, <8 x i64>* %arg1 + store <8 x i64> %tmp, <8 x i64>* %arg + ret void +} + +define void @avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %arg) #1 { +bb: + %tmp = alloca <8 x i64>, align 32 + %tmp2 = alloca <8 x i64>, align 32 + %tmp3 = bitcast <8 x i64>* %tmp to i8* + call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) + call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %tmp2, <8 x i64>* %tmp) + %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 + store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 + ret void +} + +; This should not promote +; CHECK-LABEL: @callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) +define internal fastcc void @callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 { +bb: + %tmp = load <8 x i64>, <8 x i64>* %arg1 + store <8 x i64> %tmp, <8 x i64>* %arg + ret void +} + +define void @avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg) #2 { +bb: + %tmp = alloca <8 x i64>, align 32 + %tmp2 = alloca <8 x i64>, align 32 + %tmp3 = bitcast <8 x i64>* %tmp to i8* + call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) + call fastcc void @callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) + %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 + store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 + ret void +} + +; This should not promote +; CHECK-LABEL: @callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) +define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #2 { +bb: + %tmp = load <8 x i64>, <8 x i64>* %arg1 + store <8 x i64> %tmp, <8 x i64>* %arg + ret void +} + +define void @avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %arg) #1 { +bb: + %tmp = alloca <8 x i64>, align 32 + %tmp2 = alloca <8 x i64>, align 32 + %tmp3 = bitcast <8 x i64>* %tmp to i8* + call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) + call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) + %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 + store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 + ret void +} + +; This should promote +; CHECK-LABEL: @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %arg, <8 x i64> %arg1.val) +define internal fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #3 { +bb: + %tmp = load <8 x i64>, <8 x i64>* %arg1 + store <8 x i64> %tmp, <8 x i64>* %arg + ret void +} + +define void @avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %arg) #4 { +bb: + %tmp = alloca <8 x i64>, align 32 + %tmp2 = alloca <8 x i64>, align 32 + %tmp3 = bitcast <8 x i64>* %tmp to i8* + call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) + call fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) + %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 + store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 + ret void +} + +; This should promote +; CHECK-LABEL: @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %arg, <8 x i64> %arg1.val) +define internal fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #4 { +bb: + %tmp = load <8 x i64>, <8 x i64>* %arg1 + store <8 x i64> %tmp, <8 x i64>* %arg + ret void +} + +define void @avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %arg) #3 { +bb: + %tmp = alloca <8 x i64>, align 32 + %tmp2 = alloca <8 x i64>, align 32 + %tmp3 = bitcast <8 x i64>* %tmp to i8* + call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) + call fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) + %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 + store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 + ret void +} + +; Function Attrs: argmemonly nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #5 + +attributes #0 = { inlinehint norecurse nounwind uwtable "target-features"="+avx512vl" "min-legal-vector-width"="512" "prefer-vector-width"="512" } +attributes #1 = { inlinehint norecurse nounwind uwtable "target-features"="+avx512vl" "min-legal-vector-width"="512" "prefer-vector-width"="256" } +attributes #2 = { inlinehint norecurse nounwind uwtable "target-features"="+avx512vl" "min-legal-vector-width"="256" "prefer-vector-width"="256" } +attributes #3 = { inlinehint norecurse nounwind uwtable "target-features"="+avx2" "min-legal-vector-width"="512" "prefer-vector-width"="256" } +attributes #4 = { inlinehint norecurse nounwind uwtable "target-features"="+avx2" "min-legal-vector-width"="256" "prefer-vector-width"="256" } +attributes #5 = { argmemonly nounwind }