; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
; The easy case: a constant power-of-2 divisor.
define i64 @const_pow_2(i64 %x) {
-; CHECK-LABEL: const_pow_2:
-; CHECK: # BB#0:
-; CHECK-NEXT: andl $31, %edi
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: retq
+; X86-LABEL: const_pow_2:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $31, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: const_pow_2:
+; X64: # BB#0:
+; X64-NEXT: andl $31, %edi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
%urem = urem i64 %x, 32
ret i64 %urem
}
; A left-shifted power-of-2 divisor. Use a weird type for wider coverage.
define i25 @shift_left_pow_2(i25 %x, i25 %y) {
-; CHECK-LABEL: shift_left_pow_2:
-; CHECK: # BB#0:
-; CHECK-NEXT: movl $1, %eax
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $33554431, %eax # imm = 0x1FFFFFF
-; CHECK-NEXT: andl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shift_left_pow_2:
+; X86: # BB#0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: addl $33554431, %eax # imm = 0x1FFFFFF
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shift_left_pow_2:
+; X64: # BB#0:
+; X64-NEXT: movl $1, %eax
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $33554431, %eax # imm = 0x1FFFFFF
+; X64-NEXT: andl %edi, %eax
+; X64-NEXT: retq
%shl = shl i25 1, %y
%urem = urem i25 %x, %shl
ret i25 %urem
; A logically right-shifted sign bit is a power-of-2 or UB.
define i16 @shift_right_pow_2(i16 %x, i16 %y) {
-; CHECK-LABEL: shift_right_pow_2:
-; CHECK: # BB#0:
-; CHECK-NEXT: movl $32768, %eax # imm = 0x8000
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: shrl %cl, %eax
-; CHECK-NEXT: decl %eax
-; CHECK-NEXT: andl %edi, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; CHECK-NEXT: retq
+; X86-LABEL: shift_right_pow_2:
+; X86: # BB#0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl $32768, %eax # imm = 0x8000
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: decl %eax
+; X86-NEXT: andw {{[0-9]+}}(%esp), %ax
+; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: shift_right_pow_2:
+; X64: # BB#0:
+; X64-NEXT: movl $32768, %eax # imm = 0x8000
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shrl %cl, %eax
+; X64-NEXT: decl %eax
+; X64-NEXT: andl %edi, %eax
+; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: retq
%shr = lshr i16 -32768, %y
%urem = urem i16 %x, %shr
ret i16 %urem
; FIXME: A zero divisor would be UB, so this could be reduced to an 'and' with 3.
define i8 @and_pow_2(i8 %x, i8 %y) {
-; CHECK-LABEL: and_pow_2:
-; CHECK: # BB#0:
-; CHECK-NEXT: andb $4, %sil
-; CHECK-NEXT: movzbl %dil, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
-; CHECK-NEXT: divb %sil
-; CHECK-NEXT: movzbl %ah, %eax # NOREX
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
-; CHECK-NEXT: retq
+; X86-LABEL: and_pow_2:
+; X86: # BB#0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: andb $4, %cl
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X86-NEXT: divb %cl
+; X86-NEXT: movzbl %ah, %eax # NOREX
+; X86-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-NEXT: retl
+;
+; X64-LABEL: and_pow_2:
+; X64: # BB#0:
+; X64-NEXT: andb $4, %sil
+; X64-NEXT: movzbl %dil, %eax
+; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X64-NEXT: divb %sil
+; X64-NEXT: movzbl %ah, %eax # NOREX
+; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: retq
%and = and i8 %y, 4
%urem = urem i8 %x, %and
ret i8 %urem
; A vector splat constant divisor should get the same treatment as a scalar.
define <4 x i32> @vec_const_pow_2(<4 x i32> %x) {
-; CHECK-LABEL: vec_const_pow_2:
-; CHECK: # BB#0:
-; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
-; CHECK-NEXT: retq
+; X86-LABEL: vec_const_pow_2:
+; X86: # BB#0:
+; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: vec_const_pow_2:
+; X64: # BB#0:
+; X64-NEXT: andps {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
%urem = urem <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
ret <4 x i32> %urem
}