From f6d6d2b0f7ca39f7b64137c4c90241af7099ebe8 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 12 Jun 2017 10:01:27 +0000 Subject: [PATCH] [X86][SSE] Change memop fragment to inherit from vec128load with local alignment controls First possible step towards merging SSE/AVX memory folding pattern fragments. Also allows us to remove the duplicate non-temporal load logic. Differential Revision: https://reviews.llvm.org/D33902 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@305184 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrFragmentsSIMD.td | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td index 5224a16613c..c28b35b2297 100644 --- a/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -737,19 +737,15 @@ def alignedloadv8f64 : PatFrag<(ops node:$ptr), def alignedloadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (alignedload512 node:$ptr))>; -// Like 'load', but uses special alignment checks suitable for use in +// Like 'vec128load', but uses special alignment checks suitable for use in // memory operands in most SSE instructions, which are required to // be naturally aligned on some targets but not on others. If the subtarget // allows unaligned accesses, match any load, though this may require // setting a feature bit in the processor (on startup, for example). // Opteron 10h and later implement such a feature. -// Avoid non-temporal aligned loads on supported targets. -def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return (Subtarget->hasSSEUnalignedMem() || - cast(N)->getAlignment() >= 16) && - (!Subtarget->hasSSE41() || - !(cast(N)->getAlignment() >= 16 && - cast(N)->isNonTemporal())); +def memop : PatFrag<(ops node:$ptr), (vec128load node:$ptr), [{ + return Subtarget->hasSSEUnalignedMem() || + cast(N)->getAlignment() >= 16; }]>; // 128-bit memop pattern fragments -- 2.40.0