From: Benjamin Kramer Date: Tue, 26 Sep 2017 10:25:27 +0000 (+0000) Subject: Revert "[X86] Make all the NOREX CodeGenOnly instructions into postRA pseudos like... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=074df004cb13e5539d10819d40ebe85e2741706d;p=llvm Revert "[X86] Make all the NOREX CodeGenOnly instructions into postRA pseudos like the NOREX version of TEST." Makes llc crash. This reverts commit r314151. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@314199 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrExtension.td b/lib/Target/X86/X86InstrExtension.td index 83fbea2dd1c..af43d9f5332 100644 --- a/lib/Target/X86/X86InstrExtension.td +++ b/lib/Target/X86/X86InstrExtension.td @@ -94,22 +94,26 @@ def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), // These are the same as the regular MOVZX32rr8 and MOVZX32rm8 // except that they use GR32_NOREX for the output operand register class // instead of GR32. This allows them to operate on h registers on x86-64. -let hasSideEffects = 0, isPseudo = 1 in { -def MOVZX32_NOREXrr8 : I<0, Pseudo, +let hasSideEffects = 0, isCodeGenOnly = 1 in { +def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg, (outs GR32_NOREX:$dst), (ins GR8_NOREX:$src), - "", [], IIC_MOVZX>, Sched<[WriteALU]>; + "movz{bl|x}\t{$src, $dst|$dst, $src} # NOREX", + [], IIC_MOVZX>, TB, OpSize32, Sched<[WriteALU]>; let mayLoad = 1 in -def MOVZX32_NOREXrm8 : I<0, Pseudo, +def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem, (outs GR32_NOREX:$dst), (ins i8mem_NOREX:$src), - "", [], IIC_MOVZX>, Sched<[WriteALULd]>; + "movz{bl|x}\t{$src, $dst|$dst, $src} # NOREX", + [], IIC_MOVZX>, TB, OpSize32, Sched<[WriteALULd]>; -def MOVSX32_NOREXrr8 : I<0, Pseudo, +def MOVSX32_NOREXrr8 : I<0xBE, MRMSrcReg, (outs GR32_NOREX:$dst), (ins GR8_NOREX:$src), - "", [], IIC_MOVSX>, Sched<[WriteALU]>; + "movs{bl|x}\t{$src, $dst|$dst, $src} # NOREX", + [], IIC_MOVSX>, TB, OpSize32, Sched<[WriteALU]>; let mayLoad = 1 in -def MOVSX32_NOREXrm8 : I<0, Pseudo, +def MOVSX32_NOREXrm8 : I<0xBE, MRMSrcMem, (outs GR32_NOREX:$dst), (ins i8mem_NOREX:$src), - "", [], IIC_MOVSX>, Sched<[WriteALULd]>; + "movs{bl|x}\t{$src, $dst|$dst, $src} # NOREX", + [], IIC_MOVSX>, TB, OpSize32, Sched<[WriteALULd]>; } // MOVSX64rr8 always has a REX prefix and it has an 8-bit register diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 6638c3bb2ac..0561bcd8d0a 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -7888,27 +7888,6 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case X86::VMOVUPSZ256mr_NOVLX: return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr), get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm); - case X86::MOV8rr_NOREX: - MI.setDesc(get(X86::MOV8rr)); - return true; - case X86::MOV8rm_NOREX: - MI.setDesc(get(X86::MOV8rm)); - return true; - case X86::MOV8mr_NOREX: - MI.setDesc(get(X86::MOV8mr)); - return true; - case X86::MOVZX32_NOREXrr8: - MI.setDesc(get(X86::MOVZX32rr8)); - return true; - case X86::MOVZX32_NOREXrm8: - MI.setDesc(get(X86::MOVZX32rm8)); - return true; - case X86::MOVSX32_NOREXrr8: - MI.setDesc(get(X86::MOVSX32rr8)); - return true; - case X86::MOVSX32_NOREXrm8: - MI.setDesc(get(X86::MOVSX32rm8)); - return true; case X86::TEST8ri_NOREX: MI.setDesc(get(X86::TEST8ri)); return true; diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index d42a6e31af4..4800ac99bed 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -1618,20 +1618,23 @@ def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so // that they can be used for copying and storing h registers, which can't be // encoded when a REX prefix is present. -let isPseudo = 1 in { +let isCodeGenOnly = 1 in { let hasSideEffects = 0 in -def MOV8rr_NOREX : I<0, Pseudo, +def MOV8rr_NOREX : I<0x88, MRMDestReg, (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src), - "", [], IIC_MOV>, Sched<[WriteMove]>; + "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], IIC_MOV>, + Sched<[WriteMove]>; let mayStore = 1, hasSideEffects = 0 in -def MOV8mr_NOREX : I<0, Pseudo, +def MOV8mr_NOREX : I<0x88, MRMDestMem, (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src), - "", [], IIC_MOV_MEM>, Sched<[WriteStore]>; + "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], + IIC_MOV_MEM>, Sched<[WriteStore]>; let mayLoad = 1, hasSideEffects = 0, canFoldAsLoad = 1, isReMaterializable = 1 in -def MOV8rm_NOREX : I<0, Pseudo, +def MOV8rm_NOREX : I<0x8A, MRMSrcMem, (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src), - "", [], IIC_MOV_MEM>, Sched<[WriteLoad]>; + "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], + IIC_MOV_MEM>, Sched<[WriteLoad]>; } diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp index 7a770d6cbc5..36d81128acf 100644 --- a/lib/Target/X86/X86MCInstLower.cpp +++ b/lib/Target/X86/X86MCInstLower.cpp @@ -604,7 +604,9 @@ ReSimplify: // Note, we are currently not handling the following instructions: // MOV64ao8, MOV64o8a // XCHG16ar, XCHG32ar, XCHG64ar + case X86::MOV8mr_NOREX: case X86::MOV8mr: + case X86::MOV8rm_NOREX: case X86::MOV8rm: case X86::MOV16mr: case X86::MOV16rm: @@ -613,7 +615,9 @@ ReSimplify: unsigned NewOpc; switch (OutMI.getOpcode()) { default: llvm_unreachable("Invalid opcode"); + case X86::MOV8mr_NOREX: case X86::MOV8mr: NewOpc = X86::MOV8o32a; break; + case X86::MOV8rm_NOREX: case X86::MOV8rm: NewOpc = X86::MOV8ao32; break; case X86::MOV16mr: NewOpc = X86::MOV16o32a; break; case X86::MOV16rm: NewOpc = X86::MOV16ao32; break; diff --git a/test/CodeGen/X86/bmi.ll b/test/CodeGen/X86/bmi.ll index ad5a8be974c..de9b0bbbdc6 100644 --- a/test/CodeGen/X86/bmi.ll +++ b/test/CodeGen/X86/bmi.ll @@ -316,7 +316,7 @@ define i32 @bextr32_subreg(i32 %x) uwtable ssp { ; CHECK-LABEL: bextr32_subreg: ; CHECK: # BB#0: ; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: movzbl %ah, %eax +; CHECK-NEXT: movzbl %ah, %eax # NOREX ; CHECK-NEXT: retq %1 = lshr i32 %x, 8 %2 = and i32 %1, 255 @@ -374,7 +374,7 @@ define i64 @bextr64_subreg(i64 %x) uwtable ssp { ; CHECK-LABEL: bextr64_subreg: ; CHECK: # BB#0: ; CHECK-NEXT: movq %rdi, %rax -; CHECK-NEXT: movzbl %ah, %eax +; CHECK-NEXT: movzbl %ah, %eax # NOREX ; CHECK-NEXT: retq %1 = lshr i64 %x, 8 %2 = and i64 %1, 255 diff --git a/test/CodeGen/X86/bypass-slow-division-32.ll b/test/CodeGen/X86/bypass-slow-division-32.ll index 9eb39d99962..9f266647d8a 100644 --- a/test/CodeGen/X86/bypass-slow-division-32.ll +++ b/test/CodeGen/X86/bypass-slow-division-32.ll @@ -43,7 +43,7 @@ define i32 @Test_get_remainder(i32 %a, i32 %b) nounwind { ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: # kill: %EAX %EAX %AX ; CHECK-NEXT: divb %cl -; CHECK-NEXT: movzbl %ah, %eax +; CHECK-NEXT: movzbl %ah, %eax # NOREX ; CHECK-NEXT: retl %result = srem i32 %a, %b ret i32 %result @@ -67,7 +67,7 @@ define i32 @Test_get_quotient_and_remainder(i32 %a, i32 %b) nounwind { ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: # kill: %EAX %EAX %AX ; CHECK-NEXT: divb %cl -; CHECK-NEXT: movzbl %ah, %edx +; CHECK-NEXT: movzbl %ah, %edx # NOREX ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: addl %edx, %eax ; CHECK-NEXT: retl diff --git a/test/CodeGen/X86/divrem.ll b/test/CodeGen/X86/divrem.ll index 6cd3f919af8..73d16060be7 100644 --- a/test/CodeGen/X86/divrem.ll +++ b/test/CodeGen/X86/divrem.ll @@ -122,7 +122,7 @@ define void @si8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind { ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: cbtw ; X32-NEXT: idivb {{[0-9]+}}(%esp) -; X32-NEXT: movsbl %ah, %ebx +; X32-NEXT: movsbl %ah, %ebx # NOREX ; X32-NEXT: movb %al, (%edx) ; X32-NEXT: movb %bl, (%ecx) ; X32-NEXT: popl %ebx @@ -133,7 +133,7 @@ define void @si8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind { ; X64-NEXT: movl %edi, %eax ; X64-NEXT: cbtw ; X64-NEXT: idivb %sil -; X64-NEXT: movsbl %ah, %esi +; X64-NEXT: movsbl %ah, %esi # NOREX ; X64-NEXT: movb %al, (%rdx) ; X64-NEXT: movb %sil, (%rcx) ; X64-NEXT: retq @@ -264,7 +264,7 @@ define void @ui8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind { ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: # kill: %EAX %EAX %AX ; X32-NEXT: divb {{[0-9]+}}(%esp) -; X32-NEXT: movzbl %ah, %ebx +; X32-NEXT: movzbl %ah, %ebx # NOREX ; X32-NEXT: movb %al, (%edx) ; X32-NEXT: movb %bl, (%ecx) ; X32-NEXT: popl %ebx @@ -275,7 +275,7 @@ define void @ui8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind { ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: # kill: %EAX %EAX %AX ; X64-NEXT: divb %sil -; X64-NEXT: movzbl %ah, %esi +; X64-NEXT: movzbl %ah, %esi # NOREX ; X64-NEXT: movb %al, (%rdx) ; X64-NEXT: movb %sil, (%rcx) ; X64-NEXT: retq diff --git a/test/CodeGen/X86/divrem8_ext.ll b/test/CodeGen/X86/divrem8_ext.ll index 931f3eb8dda..7521156a370 100644 --- a/test/CodeGen/X86/divrem8_ext.ll +++ b/test/CodeGen/X86/divrem8_ext.ll @@ -8,7 +8,7 @@ define zeroext i8 @test_udivrem_zext_ah(i8 %x, i8 %y) { ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: # kill: %EAX %EAX %AX ; X32-NEXT: divb {{[0-9]+}}(%esp) -; X32-NEXT: movzbl %ah, %ecx +; X32-NEXT: movzbl %ah, %ecx # NOREX ; X32-NEXT: movb %al, z ; X32-NEXT: movl %ecx, %eax ; X32-NEXT: retl @@ -18,7 +18,7 @@ define zeroext i8 @test_udivrem_zext_ah(i8 %x, i8 %y) { ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: # kill: %EAX %EAX %AX ; X64-NEXT: divb %sil -; X64-NEXT: movzbl %ah, %ecx +; X64-NEXT: movzbl %ah, %ecx # NOREX ; X64-NEXT: movb %al, {{.*}}(%rip) ; X64-NEXT: movl %ecx, %eax ; X64-NEXT: retq @@ -34,7 +34,7 @@ define zeroext i8 @test_urem_zext_ah(i8 %x, i8 %y) { ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: # kill: %EAX %EAX %AX ; X32-NEXT: divb {{[0-9]+}}(%esp) -; X32-NEXT: movzbl %ah, %eax +; X32-NEXT: movzbl %ah, %eax # NOREX ; X32-NEXT: # kill: %AL %AL %EAX ; X32-NEXT: retl ; @@ -43,7 +43,7 @@ define zeroext i8 @test_urem_zext_ah(i8 %x, i8 %y) { ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: # kill: %EAX %EAX %AX ; X64-NEXT: divb %sil -; X64-NEXT: movzbl %ah, %eax +; X64-NEXT: movzbl %ah, %eax # NOREX ; X64-NEXT: # kill: %AL %AL %EAX ; X64-NEXT: retq %1 = urem i8 %x, %y @@ -57,7 +57,7 @@ define i8 @test_urem_noext_ah(i8 %x, i8 %y) { ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: # kill: %EAX %EAX %AX ; X32-NEXT: divb %cl -; X32-NEXT: movzbl %ah, %eax +; X32-NEXT: movzbl %ah, %eax # NOREX ; X32-NEXT: addb %cl, %al ; X32-NEXT: # kill: %AL %AL %EAX ; X32-NEXT: retl @@ -67,7 +67,7 @@ define i8 @test_urem_noext_ah(i8 %x, i8 %y) { ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: # kill: %EAX %EAX %AX ; X64-NEXT: divb %sil -; X64-NEXT: movzbl %ah, %eax +; X64-NEXT: movzbl %ah, %eax # NOREX ; X64-NEXT: addb %sil, %al ; X64-NEXT: # kill: %AL %AL %EAX ; X64-NEXT: retq @@ -82,7 +82,7 @@ define i64 @test_urem_zext64_ah(i8 %x, i8 %y) { ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: # kill: %EAX %EAX %AX ; X32-NEXT: divb {{[0-9]+}}(%esp) -; X32-NEXT: movzbl %ah, %eax +; X32-NEXT: movzbl %ah, %eax # NOREX ; X32-NEXT: xorl %edx, %edx ; X32-NEXT: retl ; @@ -91,7 +91,7 @@ define i64 @test_urem_zext64_ah(i8 %x, i8 %y) { ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: # kill: %EAX %EAX %AX ; X64-NEXT: divb %sil -; X64-NEXT: movzbl %ah, %eax +; X64-NEXT: movzbl %ah, %eax # NOREX ; X64-NEXT: movzbl %al, %eax ; X64-NEXT: retq %1 = urem i8 %x, %y @@ -105,7 +105,7 @@ define signext i8 @test_sdivrem_sext_ah(i8 %x, i8 %y) { ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: cbtw ; X32-NEXT: idivb {{[0-9]+}}(%esp) -; X32-NEXT: movsbl %ah, %ecx +; X32-NEXT: movsbl %ah, %ecx # NOREX ; X32-NEXT: movb %al, z ; X32-NEXT: movl %ecx, %eax ; X32-NEXT: retl @@ -115,7 +115,7 @@ define signext i8 @test_sdivrem_sext_ah(i8 %x, i8 %y) { ; X64-NEXT: movl %edi, %eax ; X64-NEXT: cbtw ; X64-NEXT: idivb %sil -; X64-NEXT: movsbl %ah, %ecx +; X64-NEXT: movsbl %ah, %ecx # NOREX ; X64-NEXT: movb %al, {{.*}}(%rip) ; X64-NEXT: movl %ecx, %eax ; X64-NEXT: retq @@ -131,7 +131,7 @@ define signext i8 @test_srem_sext_ah(i8 %x, i8 %y) { ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: cbtw ; X32-NEXT: idivb {{[0-9]+}}(%esp) -; X32-NEXT: movsbl %ah, %eax +; X32-NEXT: movsbl %ah, %eax # NOREX ; X32-NEXT: # kill: %AL %AL %EAX ; X32-NEXT: retl ; @@ -140,7 +140,7 @@ define signext i8 @test_srem_sext_ah(i8 %x, i8 %y) { ; X64-NEXT: movl %edi, %eax ; X64-NEXT: cbtw ; X64-NEXT: idivb %sil -; X64-NEXT: movsbl %ah, %eax +; X64-NEXT: movsbl %ah, %eax # NOREX ; X64-NEXT: # kill: %AL %AL %EAX ; X64-NEXT: retq %1 = srem i8 %x, %y @@ -154,7 +154,7 @@ define i8 @test_srem_noext_ah(i8 %x, i8 %y) { ; X32-NEXT: movb {{[0-9]+}}(%esp), %cl ; X32-NEXT: cbtw ; X32-NEXT: idivb %cl -; X32-NEXT: movsbl %ah, %eax +; X32-NEXT: movsbl %ah, %eax # NOREX ; X32-NEXT: addb %cl, %al ; X32-NEXT: # kill: %AL %AL %EAX ; X32-NEXT: retl @@ -164,7 +164,7 @@ define i8 @test_srem_noext_ah(i8 %x, i8 %y) { ; X64-NEXT: movl %edi, %eax ; X64-NEXT: cbtw ; X64-NEXT: idivb %sil -; X64-NEXT: movsbl %ah, %eax +; X64-NEXT: movsbl %ah, %eax # NOREX ; X64-NEXT: addb %sil, %al ; X64-NEXT: # kill: %AL %AL %EAX ; X64-NEXT: retq @@ -179,7 +179,7 @@ define i64 @test_srem_sext64_ah(i8 %x, i8 %y) { ; X32-NEXT: movb {{[0-9]+}}(%esp), %al ; X32-NEXT: cbtw ; X32-NEXT: idivb {{[0-9]+}}(%esp) -; X32-NEXT: movsbl %ah, %eax +; X32-NEXT: movsbl %ah, %eax # NOREX ; X32-NEXT: movl %eax, %edx ; X32-NEXT: sarl $31, %edx ; X32-NEXT: retl @@ -189,7 +189,7 @@ define i64 @test_srem_sext64_ah(i8 %x, i8 %y) { ; X64-NEXT: movl %edi, %eax ; X64-NEXT: cbtw ; X64-NEXT: idivb %sil -; X64-NEXT: movsbl %ah, %eax +; X64-NEXT: movsbl %ah, %eax # NOREX ; X64-NEXT: movsbq %al, %rax ; X64-NEXT: retq %1 = srem i8 %x, %y @@ -203,7 +203,7 @@ define i64 @pr25754(i8 %a, i8 %c) { ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: # kill: %EAX %EAX %AX ; X32-NEXT: divb {{[0-9]+}}(%esp) -; X32-NEXT: movzbl %ah, %ecx +; X32-NEXT: movzbl %ah, %ecx # NOREX ; X32-NEXT: movzbl %al, %eax ; X32-NEXT: addl %ecx, %eax ; X32-NEXT: xorl %edx, %edx @@ -214,7 +214,7 @@ define i64 @pr25754(i8 %a, i8 %c) { ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: # kill: %EAX %EAX %AX ; X64-NEXT: divb %sil -; X64-NEXT: movzbl %ah, %ecx +; X64-NEXT: movzbl %ah, %ecx # NOREX ; X64-NEXT: movzbl %cl, %ecx ; X64-NEXT: movzbl %al, %eax ; X64-NEXT: addq %rcx, %rax diff --git a/test/CodeGen/X86/extract-store.ll b/test/CodeGen/X86/extract-store.ll index cd5d336a273..e39f3f170a2 100644 --- a/test/CodeGen/X86/extract-store.ll +++ b/test/CodeGen/X86/extract-store.ll @@ -114,7 +114,7 @@ define void @extract_i8_15(i8* nocapture %dst, <16 x i8> %foo) nounwind { ; SSE2-X64-LABEL: extract_i8_15: ; SSE2-X64: # BB#0: ; SSE2-X64-NEXT: pextrw $7, %xmm0, %eax -; SSE2-X64-NEXT: movb %ah, (%rdi) +; SSE2-X64-NEXT: movb %ah, (%rdi) # NOREX ; SSE2-X64-NEXT: retq ; ; SSE41-X32-LABEL: extract_i8_15: @@ -142,7 +142,7 @@ define void @extract_i8_15(i8* nocapture %dst, <16 x i8> %foo) nounwind { ; SSE-F128-LABEL: extract_i8_15: ; SSE-F128: # BB#0: ; SSE-F128-NEXT: pextrw $7, %xmm0, %eax -; SSE-F128-NEXT: movb %ah, (%rdi) +; SSE-F128-NEXT: movb %ah, (%rdi) # NOREX ; SSE-F128-NEXT: retq %vecext = extractelement <16 x i8> %foo, i32 15 store i8 %vecext, i8* %dst, align 1 diff --git a/test/CodeGen/X86/popcnt.ll b/test/CodeGen/X86/popcnt.ll index fdddb696b2f..b5d4ebba053 100644 --- a/test/CodeGen/X86/popcnt.ll +++ b/test/CodeGen/X86/popcnt.ll @@ -101,7 +101,7 @@ define i16 @cnt16(i16 %x) nounwind readnone { ; X64-NEXT: movl %eax, %ecx ; X64-NEXT: shll $8, %ecx ; X64-NEXT: addl %eax, %ecx -; X64-NEXT: movzbl %ch, %eax +; X64-NEXT: movzbl %ch, %eax # NOREX ; X64-NEXT: # kill: %AX %AX %EAX ; X64-NEXT: retq ; diff --git a/test/CodeGen/X86/tbm_patterns.ll b/test/CodeGen/X86/tbm_patterns.ll index 47b642bacff..a72b5405615 100644 --- a/test/CodeGen/X86/tbm_patterns.ll +++ b/test/CodeGen/X86/tbm_patterns.ll @@ -18,7 +18,7 @@ define i32 @test_x86_tbm_bextri_u32_subreg(i32 %a) nounwind { ; CHECK-LABEL: test_x86_tbm_bextri_u32_subreg: ; CHECK: # BB#0: ; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: movzbl %ah, %eax +; CHECK-NEXT: movzbl %ah, %eax # NOREX ; CHECK-NEXT: retq %t0 = lshr i32 %a, 8 %t1 = and i32 %t0, 255 @@ -79,7 +79,7 @@ define i64 @test_x86_tbm_bextri_u64_subreg(i64 %a) nounwind { ; CHECK-LABEL: test_x86_tbm_bextri_u64_subreg: ; CHECK: # BB#0: ; CHECK-NEXT: movq %rdi, %rax -; CHECK-NEXT: movzbl %ah, %eax +; CHECK-NEXT: movzbl %ah, %eax # NOREX ; CHECK-NEXT: retq %t0 = lshr i64 %a, 8 %t1 = and i64 %t0, 255 diff --git a/test/CodeGen/X86/urem-power-of-two.ll b/test/CodeGen/X86/urem-power-of-two.ll index c92675d844a..72f96776bab 100644 --- a/test/CodeGen/X86/urem-power-of-two.ll +++ b/test/CodeGen/X86/urem-power-of-two.ll @@ -83,7 +83,7 @@ define i8 @and_pow_2(i8 %x, i8 %y) { ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: # kill: %EAX %EAX %AX ; X86-NEXT: divb %cl -; X86-NEXT: movzbl %ah, %eax +; X86-NEXT: movzbl %ah, %eax # NOREX ; X86-NEXT: # kill: %AL %AL %EAX ; X86-NEXT: retl ; @@ -93,7 +93,7 @@ define i8 @and_pow_2(i8 %x, i8 %y) { ; X64-NEXT: movzbl %dil, %eax ; X64-NEXT: # kill: %EAX %EAX %AX ; X64-NEXT: divb %sil -; X64-NEXT: movzbl %ah, %eax +; X64-NEXT: movzbl %ah, %eax # NOREX ; X64-NEXT: # kill: %AL %AL %EAX ; X64-NEXT: retq %and = and i8 %y, 4