From: Craig Topper Date: Sat, 26 Nov 2016 02:13:58 +0000 (+0000) Subject: [X86] Add SSE, AVX, and AVX2 version of MOVDQU to the load/store folding tables for... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=7f41923e8b37ceec00dc6fbad0d9da72933b9432;p=llvm [X86] Add SSE, AVX, and AVX2 version of MOVDQU to the load/store folding tables for consistency. Not sure this is truly needed but we had the floating point equivalents, the aligned equivalents, and the EVEX equivalents. So this just makes it complete. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@287960 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 43b82e54462..c66fcfd7aed 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -335,6 +335,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::MOVAPDrr, X86::MOVAPDmr, TB_FOLDED_STORE | TB_ALIGN_16 }, { X86::MOVAPSrr, X86::MOVAPSmr, TB_FOLDED_STORE | TB_ALIGN_16 }, { X86::MOVDQArr, X86::MOVDQAmr, TB_FOLDED_STORE | TB_ALIGN_16 }, + { X86::MOVDQUrr, X86::MOVDQUmr, TB_FOLDED_STORE }, { X86::MOVPDI2DIrr, X86::MOVPDI2DImr, TB_FOLDED_STORE }, { X86::MOVPQIto64rr,X86::MOVPQI2QImr, TB_FOLDED_STORE }, { X86::MOVSDto64rr, X86::MOVSDto64mr, TB_FOLDED_STORE }, @@ -380,6 +381,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VMOVAPDrr, X86::VMOVAPDmr, TB_FOLDED_STORE | TB_ALIGN_16 }, { X86::VMOVAPSrr, X86::VMOVAPSmr, TB_FOLDED_STORE | TB_ALIGN_16 }, { X86::VMOVDQArr, X86::VMOVDQAmr, TB_FOLDED_STORE | TB_ALIGN_16 }, + { X86::VMOVDQUrr, X86::VMOVDQUmr, TB_FOLDED_STORE }, { X86::VMOVPDI2DIrr,X86::VMOVPDI2DImr, TB_FOLDED_STORE }, { X86::VMOVPQIto64rr, X86::VMOVPQI2QImr,TB_FOLDED_STORE }, { X86::VMOVSDto64rr,X86::VMOVSDto64mr, TB_FOLDED_STORE }, @@ -394,6 +396,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VMOVAPDYrr, X86::VMOVAPDYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, { X86::VMOVAPSYrr, X86::VMOVAPSYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, { X86::VMOVDQAYrr, X86::VMOVDQAYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, + { X86::VMOVDQUYrr, X86::VMOVDQUYmr, TB_FOLDED_STORE }, { X86::VMOVUPDYrr, X86::VMOVUPDYmr, TB_FOLDED_STORE }, { X86::VMOVUPSYrr, X86::VMOVUPSYmr, TB_FOLDED_STORE }, @@ -540,6 +543,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::MOVDI2PDIrr, X86::MOVDI2PDIrm, 0 }, { X86::MOVDI2SSrr, X86::MOVDI2SSrm, 0 }, { X86::MOVDQArr, X86::MOVDQArm, TB_ALIGN_16 }, + { X86::MOVDQUrr, X86::MOVDQUrm, 0 }, { X86::MOVSHDUPrr, X86::MOVSHDUPrm, TB_ALIGN_16 }, { X86::MOVSLDUPrr, X86::MOVSLDUPrm, TB_ALIGN_16 }, { X86::MOVSX16rr8, X86::MOVSX16rm8, 0 }, @@ -655,6 +659,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VMOVDI2PDIrr, X86::VMOVDI2PDIrm, 0 }, { X86::VMOVDI2SSrr, X86::VMOVDI2SSrm, 0 }, { X86::VMOVDQArr, X86::VMOVDQArm, TB_ALIGN_16 }, + { X86::VMOVDQUrr, X86::VMOVDQUrm, 0 }, { X86::VMOVSLDUPrr, X86::VMOVSLDUPrm, 0 }, { X86::VMOVSHDUPrr, X86::VMOVSHDUPrm, 0 }, { X86::VMOVUPDrr, X86::VMOVUPDrm, 0 }, @@ -710,6 +715,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VMOVAPSYrr, X86::VMOVAPSYrm, TB_ALIGN_32 }, { X86::VMOVDDUPYrr, X86::VMOVDDUPYrm, 0 }, { X86::VMOVDQAYrr, X86::VMOVDQAYrm, TB_ALIGN_32 }, + { X86::VMOVDQUYrr, X86::VMOVDQUYrm, 0 }, { X86::VMOVSLDUPYrr, X86::VMOVSLDUPYrm, 0 }, { X86::VMOVSHDUPYrr, X86::VMOVSHDUPYrm, 0 }, { X86::VMOVUPDYrr, X86::VMOVUPDYrm, 0 },