return R;
unsigned BitWidth = I.getType()->getScalarSizeInBits();
- if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
- unsigned ShAmt = Op1C->getZExtValue();
+ const APInt *ShAmtAPInt;
+ if (match(Op1, m_APInt(ShAmtAPInt))) {
+ unsigned ShAmt = ShAmtAPInt->getZExtValue();
// If the shift amount equals the difference in width of the destination
- // and source types:
+ // and source scalar types:
// ashr (shl (zext X), C), C --> sext X
Value *X;
if (match(Op0, m_Shl(m_ZExt(m_Value(X)), m_Specific(Op1))) &&
ret i64 %B
}
-; FIXME: The ashr should be exact (like it is in the preceding test).
+; The vector ashr should be exact (like it is in the preceding test).
define <2 x i64> @ashr1_vec(<2 x i64> %X) {
; CHECK-LABEL: @ashr1_vec(
; CHECK-NEXT: [[A:%.*]] = shl <2 x i64> %X, <i64 8, i64 8>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i64> [[A]], <i64 2, i64 2>
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i64> [[A]], <i64 2, i64 2>
; CHECK-NEXT: ret <2 x i64> [[B]]
;
%A = shl <2 x i64> %X, <i64 8, i64 8>
ret i1 %cmp
}
-; FIXME: Vectors should fold the same way.
+; Vectors should fold the same way.
define <2 x i1> @icmp_sext8trunc_vec(<2 x i32> %x) {
; CHECK-LABEL: @icmp_sext8trunc_vec(
-; CHECK-NEXT: [[SEXT1:%.*]] = shl <2 x i32> %x, <i32 24, i32 24>
-; CHECK-NEXT: [[SEXT:%.*]] = ashr <2 x i32> [[SEXT:%.*]]1, <i32 24, i32 24>
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i32> [[SEXT]], <i32 36, i32 36>
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> %x to <2 x i8>
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[TMP1]], <i8 36, i8 36>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%trunc = trunc <2 x i32> %x to <2 x i8>
}
; FIXME: Vectors should fold the same way.
+
define <2 x i1> @test_shift_and_cmp_changed1_vec(<2 x i8> %p, <2 x i8> %q) {
; CHECK-LABEL: @test_shift_and_cmp_changed1_vec(
; CHECK-NEXT: [[ANDP:%.*]] = and <2 x i8> %p, <i8 6, i8 6>
; CHECK-NEXT: [[ANDQ:%.*]] = and <2 x i8> %q, <i8 8, i8 8>
; CHECK-NEXT: [[OR:%.*]] = or <2 x i8> [[ANDQ]], [[ANDP]]
; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i8> [[OR]], <i8 5, i8 5>
-; CHECK-NEXT: [[ASHR:%.*]] = ashr <2 x i8> [[SHL]], <i8 5, i8 5>
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[ASHR]], <i8 1, i8 1>
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[SHL]], <i8 32, i8 32>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%andp = and <2 x i8> %p, <i8 6, i8 6>
; CHECK-LABEL: @trunc_sel_larger_sext_vec(
; CHECK-NEXT: [[TRUNC:%.*]] = zext <2 x i32> %a to <2 x i64>
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i64> [[TRUNC]], <i64 48, i64 48>
-; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i64> [[SEXT]], <i64 48, i64 48>
+; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i64> [[SEXT]], <i64 48, i64 48>
; CHECK-NEXT: [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i64> [[TMP1]], <2 x i64> <i64 42, i64 43>
; CHECK-NEXT: ret <2 x i64> [[EXT]]
;
; CHECK-LABEL: @trunc_sel_smaller_sext_vec(
; CHECK-NEXT: [[TRUNC:%.*]] = trunc <2 x i64> %a to <2 x i32>
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i32> [[TRUNC]], <i32 16, i32 16>
-; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i32> [[SEXT]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i32> [[SEXT]], <i32 16, i32 16>
; CHECK-NEXT: [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i32> [[TMP1]], <2 x i32> <i32 42, i32 43>
; CHECK-NEXT: ret <2 x i32> [[EXT]]
;
define <2 x i32> @trunc_sel_equal_sext_vec(<2 x i32> %a, <2 x i1> %cmp) {
; CHECK-LABEL: @trunc_sel_equal_sext_vec(
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i32> %a, <i32 16, i32 16>
-; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i32> [[SEXT]], <i32 16, i32 16>
+; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i32> [[SEXT]], <i32 16, i32 16>
; CHECK-NEXT: [[EXT:%.*]] = select <2 x i1> %cmp, <2 x i32> [[TMP1]], <2 x i32> <i32 42, i32 43>
; CHECK-NEXT: ret <2 x i32> [[EXT]]
;
; CHECK-LABEL: @test_63(
; CHECK-NEXT: [[A:%.*]] = zext <2 x i64> %t to <2 x i65>
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i65> [[A]], <i65 33, i65 33>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i65> [[SEXT]], <i65 33, i65 33>
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i65> [[SEXT]], <i65 33, i65 33>
; CHECK-NEXT: ret <2 x i65> [[B]]
;
%a = zext <2 x i64> %t to <2 x i65>
ret i32 %tmp.5
}
-; FIXME: Vectors should get the same fold as above.
+; Vectors should get the same fold as above.
define <2 x i32> @test6_splat_vec(<2 x i12> %P) {
; CHECK-LABEL: @test6_splat_vec(
-; CHECK-NEXT: [[Z:%.*]] = zext <2 x i12> %P to <2 x i32>
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw <2 x i32> [[Z]], <i32 20, i32 20>
-; CHECK-NEXT: [[ASHR:%.*]] = ashr <2 x i32> [[SHL]], <i32 20, i32 20>
+; CHECK-NEXT: [[ASHR:%.*]] = sext <2 x i12> %P to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[ASHR]]
;
%z = zext <2 x i12> %P to <2 x i32>
ret <2 x i64> %b
}
-define <2 x i65> @foos(<2 x i64> %t) {
-; CHECK-LABEL: @foos(
-; CHECK-NEXT: [[A:%.*]] = zext <2 x i64> %t to <2 x i65>
-; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i65> [[A]], <i65 33, i65 33>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i65> [[SEXT]], <i65 33, i65 33>
-; CHECK-NEXT: ret <2 x i65> [[B]]
-;
- %a = trunc <2 x i64> %t to <2 x i32>
- %b = sext <2 x i32> %a to <2 x i65>
- ret <2 x i65> %b
-}
-
define <2 x i64> @bars(<2 x i65> %t) {
; CHECK-LABEL: @bars(
; CHECK-NEXT: [[A:%.*]] = trunc <2 x i65> %t to <2 x i64>
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i64> [[A]], <i64 32, i64 32>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i64> [[SEXT]], <i64 32, i64 32>
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i64> [[SEXT]], <i64 32, i64 32>
; CHECK-NEXT: ret <2 x i64> [[B]]
;
%a = trunc <2 x i65> %t to <2 x i32>
define <2 x i64> @quxs(<2 x i64> %t) {
; CHECK-LABEL: @quxs(
; CHECK-NEXT: [[SEXT:%.*]] = shl <2 x i64> %t, <i64 32, i64 32>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i64> [[SEXT]], <i64 32, i64 32>
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i64> [[SEXT]], <i64 32, i64 32>
; CHECK-NEXT: ret <2 x i64> [[B]]
;
%a = trunc <2 x i64> %t to <2 x i32>
define <2 x i64> @quxt(<2 x i64> %t) {
; CHECK-LABEL: @quxt(
; CHECK-NEXT: [[A:%.*]] = shl <2 x i64> %t, <i64 32, i64 32>
-; CHECK-NEXT: [[B:%.*]] = ashr <2 x i64> [[A]], <i64 32, i64 32>
+; CHECK-NEXT: [[B:%.*]] = ashr exact <2 x i64> [[A]], <i64 32, i64 32>
; CHECK-NEXT: ret <2 x i64> [[B]]
;
%a = shl <2 x i64> %t, <i64 32, i64 32>