%sh = or i64 %sh_lo, %sh_hi
ret i64 %sh
}
+
+define i64 @test8(i64 %hi, i64 %lo, i64 %bits) nounwind {
+; CHECK-LABEL: test8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: movl %edx, %ecx
+; CHECK-NEXT: andb $63, %cl
+; CHECK-NEXT: negb %cl
+; CHECK-NEXT: shrq %cl, %rsi
+; CHECK-NEXT: movl %edx, %ecx
+; CHECK-NEXT: shlq %cl, %rax
+; CHECK-NEXT: orq %rsi, %rax
+; CHECK-NEXT: retq
+ %tbits = trunc i64 %bits to i8
+ %tand = and i8 %tbits, 63
+ %tand64 = sub i8 64, %tand
+ %and = zext i8 %tand to i64
+ %and64 = zext i8 %tand64 to i64
+ %sh_lo = lshr i64 %lo, %and64
+ %sh_hi = shl i64 %hi, %and
+ %sh = or i64 %sh_lo, %sh_hi
+ ret i64 %sh
+}
ret i32 %sh
}
+define i32 @test18(i32 %hi, i32 %lo, i32 %bits) nounwind {
+; X86-LABEL: test18:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movb {{[0-9]+}}(%esp), %dl
+; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: andb $31, %cl
+; X86-NEXT: negb %cl
+; X86-NEXT: shrl %cl, %esi
+; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: orl %esi, %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: test18:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: movl %edx, %ecx
+; X64-NEXT: andb $31, %cl
+; X64-NEXT: negb %cl
+; X64-NEXT: shrl %cl, %esi
+; X64-NEXT: movl %edx, %ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: orl %esi, %eax
+; X64-NEXT: retq
+ %tbits = trunc i32 %bits to i8
+ %tand = and i8 %tbits, 31
+ %tand64 = sub i8 32, %tand
+ %and = zext i8 %tand to i32
+ %and64 = zext i8 %tand64 to i32
+ %sh_lo = lshr i32 %lo, %and64
+ %sh_hi = shl i32 %hi, %and
+ %sh = or i32 %sh_lo, %sh_hi
+ ret i32 %sh
+}
+
; PR34641 - Masked Shift Counts
define i32 @shld_safe_i32(i32, i32, i32) {