From: Matt Arsenault Date: Tue, 21 May 2019 23:23:10 +0000 (+0000) Subject: AMDGPU: Assume call pseudos are convergent X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=972131f95cbdecca06302766a52f0f8b496ee393;p=llvm AMDGPU: Assume call pseudos are convergent There should probably be nonconvergent versions, but my guess is it doesn't matter in practice. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@361331 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AMDGPU/SIInstructions.td b/lib/Target/AMDGPU/SIInstructions.td index 94c91ad7f2f..74302693239 100644 --- a/lib/Target/AMDGPU/SIInstructions.td +++ b/lib/Target/AMDGPU/SIInstructions.td @@ -383,6 +383,8 @@ def SI_CALL_ISEL : SPseudoInstSI < let isCall = 1; let SchedRW = [WriteBranch]; let usesCustomInserter = 1; + // TODO: Should really base this on the call target + let isConvergent = 1; } // Wrapper around s_swappc_b64 with extra $callee parameter to track @@ -393,6 +395,8 @@ def SI_CALL : SPseudoInstSI < let isCall = 1; let UseNamedOperandTable = 1; let SchedRW = [WriteBranch]; + // TODO: Should really base this on the call target + let isConvergent = 1; } // Tail call handling pseudo @@ -406,6 +410,8 @@ def SI_TCRETURN : SPseudoInstSI <(outs), let isBarrier = 1; let UseNamedOperandTable = 1; let SchedRW = [WriteBranch]; + // TODO: Should really base this on the call target + let isConvergent = 1; } diff --git a/test/CodeGen/AMDGPU/tail-duplication-convergent.ll b/test/CodeGen/AMDGPU/tail-duplication-convergent.ll new file mode 100644 index 00000000000..70790311480 --- /dev/null +++ b/test/CodeGen/AMDGPU/tail-duplication-convergent.ll @@ -0,0 +1,105 @@ +; RUN: llc -mtriple=amdgcn-amd-amdhsa -O2 -tail-dup-size=1000 -tail-dup-placement-threshold=1000 -enable-tail-merge=0 < %s | FileCheck -enable-var-scope -check-prefix=GCN %s + +; Need to to trigger tail duplication this during +; MachineBlockPlacement, since calls aren't tail duplicated pre-RA. + +declare void @nonconvergent_func() #0 +declare void @convergent_func() #1 +declare void @llvm.amdgcn.s.barrier() #1 + +; barrier shouldn't be duplicated. + +; GCN-LABEL: {{^}}taildup_barrier: +; GCN: s_barrier +; GCN-NOT: s_barrier +define void @taildup_barrier(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond) #0 { +entry: + br i1 %cond, label %bb1, label %bb2 + +bb1: + store i32 0, i32 addrspace(1)* %a + br label %call + +bb2: + store i32 1, i32 addrspace(1)* %a + br label %call + +call: + call void @llvm.amdgcn.s.barrier() + br label %ret + +ret: + ret void +} + +; GCN-LABEL: {{^}}taildup_convergent_call: +; GCN: s_swappc_b64 +; GCN-NOT: s_swappc_b64 +define void @taildup_convergent_call(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond) #1 { +entry: + br i1 %cond, label %bb1, label %bb2 + +bb1: + store i32 0, i32 addrspace(1)* %a + br label %call + +bb2: + store i32 1, i32 addrspace(1)* %a + br label %call + +call: + call void @convergent_func() + br label %ret + +ret: + ret void +} + +; TODO: Currently there is only one convergent call pseudo, but this +; theoretically could use a nonconvergent variant. +; GCN-LABEL: {{^}}taildup_nonconvergent_call: +; GCN: s_swappc_b64 +; GCN-NOT: s_swappc_b64 +define void @taildup_nonconvergent_call(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond) #1 { +entry: + br i1 %cond, label %bb1, label %bb2 + +bb1: + store i32 0, i32 addrspace(1)* %a + br label %call + +bb2: + store i32 1, i32 addrspace(1)* %a + br label %call + +call: + call void @nonconvergent_func() + br label %ret + +ret: + ret void +} + +; GCN-LABEL: {{^}}taildup_convergent_tailcall: +; GCN: s_setpc_b64 +; GCN-NOT: s_setpc_b64 +define void @taildup_convergent_tailcall(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond) #1 { +entry: + br i1 %cond, label %bb1, label %bb2 + +bb1: + store i32 0, i32 addrspace(1)* %a + br label %call + +bb2: + store i32 1, i32 addrspace(1)* %a + br label %call + +call: + tail call void @convergent_func() + ret void +} + + +attributes #0 = { nounwind } +attributes #1 = { nounwind convergent }