// Scale the leading zero count down based on the actual size of the value.
// Also scale it down based on the size of the shift.
- MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
+ unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
+ if (MaskLZ < ScaleDown)
+ return true;
+ MaskLZ -= ScaleDown;
// The final check is to ensure that any masked out high bits of X are
// already known to be zero. Otherwise, the mask has a semantic impact
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -o - %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@global = external global i32
+@global.1 = external global i64
+
+define void @patatino() {
+; CHECK-LABEL: patatino:
+; CHECK: # BB#0: # %bb
+; CHECK-NEXT: movl {{.*}}(%rip), %eax
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: shrl $31, %ecx
+; CHECK-NEXT: addl $2147483647, %ecx # imm = 0x7FFFFFFF
+; CHECK-NEXT: shrl $31, %ecx
+; CHECK-NEXT: andl $62, %ecx
+; CHECK-NEXT: andl $-536870912, %eax # imm = 0xE0000000
+; CHECK-NEXT: orl %ecx, %eax
+; CHECK-NEXT: movl %eax, {{.*}}(%rip)
+; CHECK-NEXT: retq
+bb:
+ %tmp = load i32, i32* @global
+ %tmp1 = lshr i32 %tmp, 31
+ %tmp2 = add nuw nsw i32 %tmp1, 2147483647
+ %tmp3 = load i64, i64* @global.1
+ %tmp4 = shl i64 %tmp3, 23
+ %tmp5 = add nsw i64 %tmp4, 8388639
+ %tmp6 = trunc i64 %tmp5 to i32
+ %tmp7 = lshr i32 %tmp2, %tmp6
+ %tmp8 = load i32, i32* @global
+ %tmp9 = and i32 %tmp7, 62
+ %tmp10 = and i32 %tmp8, -536870912
+ %tmp11 = or i32 %tmp9, %tmp10
+ store i32 %tmp11, i32* @global
+ ret void
+}