From: Simon Pilgrim Date: Mon, 14 Jan 2019 12:12:42 +0000 (+0000) Subject: [DAGCombiner] Add add saturation constant folding tests. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=fc46ecd5d4c4ef7073e3d090e4e0916ff6a7456e;p=llvm [DAGCombiner] Add add saturation constant folding tests. Exposes an issue with sadd_sat for computeOverflowKind, so I've disabled it for now. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@351057 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 6d828d3972d..580a668100b 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2207,8 +2207,9 @@ SDValue DAGCombiner::visitADDSAT(SDNode *N) { return N0; // If it cannot overflow, transform into an add. - if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never) - return DAG.getNode(ISD::ADD, DL, VT, N0, N1); + if (Opcode == ISD::UADDSAT) + if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never) + return DAG.getNode(ISD::ADD, DL, VT, N0, N1); return SDValue(); } diff --git a/test/CodeGen/X86/combine-add-ssat.ll b/test/CodeGen/X86/combine-add-ssat.ll index 217e91a62a4..63fde10a325 100644 --- a/test/CodeGen/X86/combine-add-ssat.ll +++ b/test/CodeGen/X86/combine-add-ssat.ll @@ -11,6 +11,39 @@ declare i32 @llvm.sadd.sat.i32 (i32, i32) declare i64 @llvm.sadd.sat.i64 (i64, i64) declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) +; fold (sadd_sat c1, c2) -> c3 +define i32 @combine_constfold_i32() { +; CHECK-LABEL: combine_constfold_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; CHECK-NEXT: xorl %ecx, %ecx +; CHECK-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF +; CHECK-NEXT: addl $100, %edx +; CHECK-NEXT: setns %cl +; CHECK-NEXT: addl $2147483647, %ecx # imm = 0x7FFFFFFF +; CHECK-NEXT: addl $100, %eax +; CHECK-NEXT: cmovol %ecx, %eax +; CHECK-NEXT: retq + %res = call i32 @llvm.sadd.sat.i32(i32 2147483647, i32 100) + ret i32 %res +} + +define <8 x i16> @combine_constfold_v8i16() { +; SSE-LABEL: combine_constfold_v8i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1] +; SSE-NEXT: paddsw {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_constfold_v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1] +; AVX-NEXT: vpaddsw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> , <8 x i16> ) + ret <8 x i16> %res +} + ; fold (sadd_sat c, x) -> (sadd_sat x, c) define i32 @combine_constant_i32(i32 %a0) { ; CHECK-LABEL: combine_constant_i32: @@ -23,8 +56,8 @@ define i32 @combine_constant_i32(i32 %a0) { ; CHECK-NEXT: incl %edi ; CHECK-NEXT: cmovnol %edi, %eax ; CHECK-NEXT: retq - %res = call i32 @llvm.sadd.sat.i32(i32 1, i32 %a0); - ret i32 %res; + %res = call i32 @llvm.sadd.sat.i32(i32 1, i32 %a0) + ret i32 %res } define <8 x i16> @combine_constant_v8i16(<8 x i16> %a0) { @@ -37,8 +70,8 @@ define <8 x i16> @combine_constant_v8i16(<8 x i16> %a0) { ; AVX: # %bb.0: ; AVX-NEXT: vpaddsw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq - %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> , <8 x i16> %a0); - ret <8 x i16> %res; + %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> , <8 x i16> %a0) + ret <8 x i16> %res } ; fold (sadd_sat c, 0) -> x @@ -47,7 +80,7 @@ define i32 @combine_zero_i32(i32 %a0) { ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq - %1 = call i32 @llvm.sadd.sat.i32(i32 %a0, i32 0); + %1 = call i32 @llvm.sadd.sat.i32(i32 %a0, i32 0) ret i32 %1 } @@ -55,7 +88,7 @@ define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) { ; CHECK-LABEL: combine_zero_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer); + %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer) ret <8 x i16> %1 } @@ -75,7 +108,7 @@ define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) { ; CHECK-NEXT: retq %1 = ashr i32 %a0, 16 %2 = lshr i32 %a1, 16 - %3 = call i32 @llvm.sadd.sat.i32(i32 %1, i32 %2); + %3 = call i32 @llvm.sadd.sat.i32(i32 %1, i32 %2) ret i32 %3 } @@ -95,6 +128,6 @@ define <8 x i16> @combine_no_overflow_v8i16(<8 x i16> %a0, <8 x i16> %a1) { ; AVX-NEXT: retq %1 = ashr <8 x i16> %a0, %2 = lshr <8 x i16> %a1, - %3 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2); + %3 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2) ret <8 x i16> %3 } diff --git a/test/CodeGen/X86/combine-add-usat.ll b/test/CodeGen/X86/combine-add-usat.ll index 26fe088f765..d7ce71ef28e 100644 --- a/test/CodeGen/X86/combine-add-usat.ll +++ b/test/CodeGen/X86/combine-add-usat.ll @@ -11,6 +11,35 @@ declare i32 @llvm.uadd.sat.i32 (i32, i32) declare i64 @llvm.uadd.sat.i64 (i64, i64) declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) +; fold (uadd_sat c1, c2) -> c3 +define i32 @combine_constfold_i32() { +; CHECK-LABEL: combine_constfold_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: movl $-1, %ecx +; CHECK-NEXT: movl $-1, %eax +; CHECK-NEXT: addl $100, %eax +; CHECK-NEXT: cmovbl %ecx, %eax +; CHECK-NEXT: retq + %res = call i32 @llvm.uadd.sat.i32(i32 4294967295, i32 100) + ret i32 %res +} + +define <8 x i16> @combine_constfold_v8i16() { +; SSE-LABEL: combine_constfold_v8i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1] +; SSE-NEXT: paddusw {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_constfold_v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1] +; AVX-NEXT: vpaddusw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> , <8 x i16> ) + ret <8 x i16> %res +} + ; fold (uadd_sat c, x) -> (add_ssat x, c) define i32 @combine_constant_i32(i32 %a0) { ; CHECK-LABEL: combine_constant_i32: @@ -19,7 +48,7 @@ define i32 @combine_constant_i32(i32 %a0) { ; CHECK-NEXT: movl $-1, %eax ; CHECK-NEXT: cmovael %edi, %eax ; CHECK-NEXT: retq - %1 = call i32 @llvm.uadd.sat.i32(i32 1, i32 %a0); + %1 = call i32 @llvm.uadd.sat.i32(i32 1, i32 %a0) ret i32 %1 } @@ -33,7 +62,7 @@ define <8 x i16> @combine_constant_v8i16(<8 x i16> %a0) { ; AVX: # %bb.0: ; AVX-NEXT: vpaddusw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq - %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> , <8 x i16> %a0); + %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> , <8 x i16> %a0) ret <8 x i16> %1 } @@ -43,7 +72,7 @@ define i32 @combine_zero_i32(i32 %a0) { ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq - %1 = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 0); + %1 = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 0) ret i32 %1 } @@ -51,7 +80,7 @@ define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) { ; CHECK-LABEL: combine_zero_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer); + %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer) ret <8 x i16> %1 } @@ -67,7 +96,7 @@ define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) { ; CHECK-NEXT: retq %1 = lshr i32 %a0, 16 %2 = lshr i32 %a1, 16 - %3 = call i32 @llvm.uadd.sat.i32(i32 %1, i32 %2); + %3 = call i32 @llvm.uadd.sat.i32(i32 %1, i32 %2) ret i32 %3 } @@ -87,6 +116,6 @@ define <8 x i16> @combine_no_overflow_v8i16(<8 x i16> %a0, <8 x i16> %a1) { ; AVX-NEXT: retq %1 = lshr <8 x i16> %a0, %2 = lshr <8 x i16> %a1, - %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2); + %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2) ret <8 x i16> %3 }