From: Craig Topper <craig.topper@gmail.com> Date: Mon, 20 Feb 2017 00:37:23 +0000 (+0000) Subject: [X86] Use memory form of shift right by 1 when the rotl immediate is one less than... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=ffea086747062e215e79d35795bf9eb6945e140e;p=llvm [X86] Use memory form of shift right by 1 when the rotl immediate is one less than the operation size. An earlier commit already did this for the register form. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@295626 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrShiftRotate.td b/lib/Target/X86/X86InstrShiftRotate.td index b7b77e73b43..8291ba0dc39 100644 --- a/lib/Target/X86/X86InstrShiftRotate.td +++ b/lib/Target/X86/X86InstrShiftRotate.td @@ -662,19 +662,19 @@ def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, u8imm:$src), // Rotate by 1 def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst), "ror{b}\t$dst", - [(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)], + [(store (rotl (loadi8 addr:$dst), (i8 7)), addr:$dst)], IIC_SR>; def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst), "ror{w}\t$dst", - [(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)], + [(store (rotl (loadi16 addr:$dst), (i8 15)), addr:$dst)], IIC_SR>, OpSize16; def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst), "ror{l}\t$dst", - [(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)], + [(store (rotl (loadi32 addr:$dst), (i8 31)), addr:$dst)], IIC_SR>, OpSize32; def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst), "ror{q}\t$dst", - [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)], + [(store (rotl (loadi64 addr:$dst), (i8 63)), addr:$dst)], IIC_SR>; } // SchedRW diff --git a/test/CodeGen/X86/rotate.ll b/test/CodeGen/X86/rotate.ll index 307f2f72f0c..5d5150ad62d 100644 --- a/test/CodeGen/X86/rotate.ll +++ b/test/CodeGen/X86/rotate.ll @@ -558,7 +558,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind { ; 64-LABEL: rotr1_64_mem: ; 64: # BB#0: -; 64-NEXT: rolq $63, (%rdi) +; 64-NEXT: rorq (%rdi) ; 64-NEXT: retq %A = load i64, i64 *%Aptr %B = shl i64 %A, 63 @@ -572,12 +572,12 @@ define void @rotr1_32_mem(i32* %Aptr) nounwind { ; 32-LABEL: rotr1_32_mem: ; 32: # BB#0: ; 32-NEXT: movl 4(%esp), %eax -; 32-NEXT: roll $31, (%eax) +; 32-NEXT: rorl (%eax) ; 32-NEXT: retl ; ; 64-LABEL: rotr1_32_mem: ; 64: # BB#0: -; 64-NEXT: roll $31, (%rdi) +; 64-NEXT: rorl (%rdi) ; 64-NEXT: retq %A = load i32, i32 *%Aptr %B = shl i32 %A, 31 @@ -591,12 +591,12 @@ define void @rotr1_16_mem(i16* %Aptr) nounwind { ; 32-LABEL: rotr1_16_mem: ; 32: # BB#0: ; 32-NEXT: movl 4(%esp), %eax -; 32-NEXT: rolw $15, (%eax) +; 32-NEXT: rorw (%eax) ; 32-NEXT: retl ; ; 64-LABEL: rotr1_16_mem: ; 64: # BB#0: -; 64-NEXT: rolw $15, (%rdi) +; 64-NEXT: rorw (%rdi) ; 64-NEXT: retq %A = load i16, i16 *%Aptr %B = shl i16 %A, 15 @@ -610,12 +610,12 @@ define void @rotr1_8_mem(i8* %Aptr) nounwind { ; 32-LABEL: rotr1_8_mem: ; 32: # BB#0: ; 32-NEXT: movl 4(%esp), %eax -; 32-NEXT: rolb $7, (%eax) +; 32-NEXT: rorb (%eax) ; 32-NEXT: retl ; ; 64-LABEL: rotr1_8_mem: ; 64: # BB#0: -; 64-NEXT: rolb $7, (%rdi) +; 64-NEXT: rorb (%rdi) ; 64-NEXT: retq %A = load i8, i8 *%Aptr %B = shl i8 %A, 7