case ISD::VECTOR_SHUFFLE:
SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N), Lo, Hi);
break;
+ case ISD::VAARG:
+ SplitVecRes_VAARG(N, Lo, Hi);
+ break;
case ISD::ANY_EXTEND_VECTOR_INREG:
case ISD::SIGN_EXTEND_VECTOR_INREG:
}
}
+void DAGTypeLegalizer::SplitVecRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
+ EVT OVT = N->getValueType(0);
+ EVT NVT = OVT.getHalfNumVectorElementsVT(*DAG.getContext());
+ SDValue Chain = N->getOperand(0);
+ SDValue Ptr = N->getOperand(1);
+ SDValue SV = N->getOperand(2);
+ SDLoc dl(N);
+
+ const unsigned Alignment = DAG.getDataLayout().getABITypeAlignment(
+ NVT.getTypeForEVT(*DAG.getContext()));
+
+ Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, SV, Alignment);
+ Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, SV, Alignment);
+ Chain = Hi.getValue(1);
+
+ // Modified the chain - switch anything that used the old chain to use
+ // the new one.
+ ReplaceValueWith(SDValue(N, 1), Chain);
+}
+
//===----------------------------------------------------------------------===//
// Operand Vector Splitting
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+;RUN: llc < %s --mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec | FileCheck %s -check-prefix=BE
+;RUN: llc < %s --mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec | FileCheck %s -check-prefix=LE
+
+define <8 x i32> @test_large_vec_vaarg(i32 %n, ...) {
+; BE-LABEL: test_large_vec_vaarg:
+; BE: # %bb.0:
+; BE-NEXT: std 4, 56(1)
+; BE-NEXT: std 5, 64(1)
+; BE-NEXT: std 6, 72(1)
+; BE-NEXT: std 7, 80(1)
+; BE-NEXT: std 8, 88(1)
+; BE-NEXT: std 9, 96(1)
+; BE-NEXT: std 10, 104(1)
+; BE-NEXT: ld 3, -8(1)
+; BE-NEXT: addi 3, 3, 15
+; BE-NEXT: rldicr 3, 3, 0, 59
+; BE-NEXT: addi 4, 3, 16
+; BE-NEXT: addi 5, 3, 31
+; BE-NEXT: std 4, -8(1)
+; BE-NEXT: rldicr 4, 5, 0, 59
+; BE-NEXT: lvx 2, 0, 3
+; BE-NEXT: addi 3, 4, 16
+; BE-NEXT: std 3, -8(1)
+; BE-NEXT: lvx 3, 0, 4
+; BE-NEXT: blr
+;
+; LE-LABEL: test_large_vec_vaarg:
+; LE: # %bb.0:
+; LE-NEXT: std 4, 40(1)
+; LE-NEXT: std 5, 48(1)
+; LE-NEXT: std 6, 56(1)
+; LE-NEXT: std 7, 64(1)
+; LE-NEXT: std 8, 72(1)
+; LE-NEXT: std 9, 80(1)
+; LE-NEXT: std 10, 88(1)
+; LE-NEXT: ld 3, -8(1)
+; LE-NEXT: addi 3, 3, 15
+; LE-NEXT: rldicr 3, 3, 0, 59
+; LE-NEXT: addi 4, 3, 31
+; LE-NEXT: addi 5, 3, 16
+; LE-NEXT: rldicr 4, 4, 0, 59
+; LE-NEXT: std 5, -8(1)
+; LE-NEXT: addi 5, 4, 16
+; LE-NEXT: lvx 2, 0, 3
+; LE-NEXT: std 5, -8(1)
+; LE-NEXT: lvx 3, 0, 4
+; LE-NEXT: blr
+ %args = alloca i8*, align 4
+ %x = va_arg i8** %args, <8 x i32>
+ ret <8 x i32> %x
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+;RUN: llc < %s -mtriple=x86_64-- -mattr=avx | FileCheck %s
+
+define <32 x i32> @test_large_vec_vaarg(i32 %n, ...) {
+; CHECK-LABEL: test_large_vec_vaarg:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; CHECK-NEXT: cmpl $24, %ecx
+; CHECK-NEXT: jae .LBB0_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: addl $8, %ecx
+; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jmp .LBB0_3
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: movq (%rsp), %rax
+; CHECK-NEXT: addq $31, %rax
+; CHECK-NEXT: andq $-32, %rax
+; CHECK-NEXT: leaq 32(%rax), %rcx
+; CHECK-NEXT: movq %rcx, (%rsp)
+; CHECK-NEXT: .LBB0_3:
+; CHECK-NEXT: vmovaps (%rax), %ymm0
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; CHECK-NEXT: cmpl $24, %ecx
+; CHECK-NEXT: jae .LBB0_5
+; CHECK-NEXT: # %bb.4:
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: addl $8, %ecx
+; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jmp .LBB0_6
+; CHECK-NEXT: .LBB0_5:
+; CHECK-NEXT: movq (%rsp), %rax
+; CHECK-NEXT: addq $31, %rax
+; CHECK-NEXT: andq $-32, %rax
+; CHECK-NEXT: leaq 32(%rax), %rcx
+; CHECK-NEXT: movq %rcx, (%rsp)
+; CHECK-NEXT: .LBB0_6:
+; CHECK-NEXT: vmovaps (%rax), %ymm1
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; CHECK-NEXT: cmpl $24, %ecx
+; CHECK-NEXT: jae .LBB0_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: addl $8, %ecx
+; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jmp .LBB0_9
+; CHECK-NEXT: .LBB0_8:
+; CHECK-NEXT: movq (%rsp), %rax
+; CHECK-NEXT: addq $31, %rax
+; CHECK-NEXT: andq $-32, %rax
+; CHECK-NEXT: leaq 32(%rax), %rcx
+; CHECK-NEXT: movq %rcx, (%rsp)
+; CHECK-NEXT: .LBB0_9:
+; CHECK-NEXT: vmovaps (%rax), %ymm2
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; CHECK-NEXT: cmpl $24, %ecx
+; CHECK-NEXT: jae .LBB0_11
+; CHECK-NEXT: # %bb.10:
+; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: addl $8, %ecx
+; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: vmovaps (%rax), %ymm3
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_11:
+; CHECK-NEXT: movq (%rsp), %rax
+; CHECK-NEXT: addq $31, %rax
+; CHECK-NEXT: andq $-32, %rax
+; CHECK-NEXT: leaq 32(%rax), %rcx
+; CHECK-NEXT: movq %rcx, (%rsp)
+; CHECK-NEXT: vmovaps (%rax), %ymm3
+; CHECK-NEXT: retq
+ %args = alloca i8*, align 4
+ %x = va_arg i8** %args, <32 x i32>
+ ret <32 x i32> %x
+}