From: Amaury Sechet Date: Sat, 2 Mar 2019 02:24:36 +0000 (+0000) Subject: Add test case for truncate funnel shifts. NFC X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=76b7cba4c016223970af88c70cd27cf3ab29db9f;p=llvm Add test case for truncate funnel shifts. NFC git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@355258 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/shift-double-x86_64.ll b/test/CodeGen/X86/shift-double-x86_64.ll index da5d10c6ee5..22993cd09df 100644 --- a/test/CodeGen/X86/shift-double-x86_64.ll +++ b/test/CodeGen/X86/shift-double-x86_64.ll @@ -114,3 +114,26 @@ define i64 @test7(i64 %hi, i64 %lo, i64 %bits) nounwind { %sh = or i64 %sh_lo, %sh_hi ret i64 %sh } + +define i64 @test8(i64 %hi, i64 %lo, i64 %bits) nounwind { +; CHECK-LABEL: test8: +; CHECK: # %bb.0: +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: movl %edx, %ecx +; CHECK-NEXT: andb $63, %cl +; CHECK-NEXT: negb %cl +; CHECK-NEXT: shrq %cl, %rsi +; CHECK-NEXT: movl %edx, %ecx +; CHECK-NEXT: shlq %cl, %rax +; CHECK-NEXT: orq %rsi, %rax +; CHECK-NEXT: retq + %tbits = trunc i64 %bits to i8 + %tand = and i8 %tbits, 63 + %tand64 = sub i8 64, %tand + %and = zext i8 %tand to i64 + %and64 = zext i8 %tand64 to i64 + %sh_lo = lshr i64 %lo, %and64 + %sh_hi = shl i64 %hi, %and + %sh = or i64 %sh_lo, %sh_hi + ret i64 %sh +} diff --git a/test/CodeGen/X86/shift-double.ll b/test/CodeGen/X86/shift-double.ll index 5e8efa7b9f0..70d323957d2 100644 --- a/test/CodeGen/X86/shift-double.ll +++ b/test/CodeGen/X86/shift-double.ll @@ -459,6 +459,45 @@ define i32 @test17(i32 %hi, i32 %lo, i32 %bits) nounwind { ret i32 %sh } +define i32 @test18(i32 %hi, i32 %lo, i32 %bits) nounwind { +; X86-LABEL: test18: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movb {{[0-9]+}}(%esp), %dl +; X86-NEXT: movl %edx, %ecx +; X86-NEXT: andb $31, %cl +; X86-NEXT: negb %cl +; X86-NEXT: shrl %cl, %esi +; X86-NEXT: movl %edx, %ecx +; X86-NEXT: shll %cl, %eax +; X86-NEXT: orl %esi, %eax +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: test18: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: movl %edx, %ecx +; X64-NEXT: andb $31, %cl +; X64-NEXT: negb %cl +; X64-NEXT: shrl %cl, %esi +; X64-NEXT: movl %edx, %ecx +; X64-NEXT: shll %cl, %eax +; X64-NEXT: orl %esi, %eax +; X64-NEXT: retq + %tbits = trunc i32 %bits to i8 + %tand = and i8 %tbits, 31 + %tand64 = sub i8 32, %tand + %and = zext i8 %tand to i32 + %and64 = zext i8 %tand64 to i32 + %sh_lo = lshr i32 %lo, %and64 + %sh_hi = shl i32 %hi, %and + %sh = or i32 %sh_lo, %sh_hi + ret i32 %sh +} + ; PR34641 - Masked Shift Counts define i32 @shld_safe_i32(i32, i32, i32) {