From 8adb73fcabba6aafd798f23e517f6290d5b04b4b Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 7 Jan 2019 12:20:35 +0000 Subject: [PATCH] Regenerate test. Prep work towards enabling SimplifyDemandedBits vector support for TRUNCATE as discussed on D56118. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@350513 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/AMDGPU/widen-smrd-loads.ll | 454 ++++++++++++++++++++---- 1 file changed, 390 insertions(+), 64 deletions(-) diff --git a/test/CodeGen/AMDGPU/widen-smrd-loads.ll b/test/CodeGen/AMDGPU/widen-smrd-loads.ll index 9a2e4280ffb..c950e2d7cd3 100644 --- a/test/CodeGen/AMDGPU/widen-smrd-loads.ll +++ b/test/CodeGen/AMDGPU/widen-smrd-loads.ll @@ -1,11 +1,38 @@ -; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s -; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -mtriple=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s -; GCN-LABEL: {{^}}widen_i16_constant_load: -; GCN: s_load_dword [[VAL:s[0-9]+]] -; GCN: s_addk_i32 [[VAL]], 0x3e7 -; GCN: s_or_b32 [[OR:s[0-9]+]], [[VAL]], 4 define amdgpu_kernel void @widen_i16_constant_load(i16 addrspace(4)* %arg) { +; SI-LABEL: widen_i16_constant_load: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_addk_i32 s0, 0x3e7 +; SI-NEXT: s_or_b32 s0, s0, 4 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_i16_constant_load: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_and_b32 s0, s0, 0xffff +; VI-NEXT: s_addk_i32 s0, 0x3e7 +; VI-NEXT: s_or_b32 s0, s0, 4 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: flat_store_short v[0:1], v2 +; VI-NEXT: s_endpgm %load = load i16, i16 addrspace(4)* %arg, align 4 %add = add i16 %load, 999 %or = or i16 %add, 4 @@ -13,12 +40,38 @@ define amdgpu_kernel void @widen_i16_constant_load(i16 addrspace(4)* %arg) { ret void } -; GCN-LABEL: {{^}}widen_i16_constant_load_zext_i32: -; GCN: s_load_dword [[VAL:s[0-9]+]] -; GCN: s_and_b32 [[TRUNC:s[0-9]+]], [[VAL]], 0xffff{{$}} -; GCN: s_addk_i32 [[TRUNC]], 0x3e7 -; GCN: s_or_b32 [[OR:s[0-9]+]], [[TRUNC]], 4 define amdgpu_kernel void @widen_i16_constant_load_zext_i32(i16 addrspace(4)* %arg) { +; SI-LABEL: widen_i16_constant_load_zext_i32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_and_b32 s0, s0, 0xffff +; SI-NEXT: s_addk_i32 s0, 0x3e7 +; SI-NEXT: s_or_b32 s0, s0, 4 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_i16_constant_load_zext_i32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_and_b32 s0, s0, 0xffff +; VI-NEXT: s_addk_i32 s0, 0x3e7 +; VI-NEXT: s_or_b32 s0, s0, 4 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm %load = load i16, i16 addrspace(4)* %arg, align 4 %ext = zext i16 %load to i32 %add = add i32 %ext, 999 @@ -27,12 +80,38 @@ define amdgpu_kernel void @widen_i16_constant_load_zext_i32(i16 addrspace(4)* %a ret void } -; GCN-LABEL: {{^}}widen_i16_constant_load_sext_i32: -; GCN: s_load_dword [[VAL:s[0-9]+]] -; GCN: s_sext_i32_i16 [[EXT:s[0-9]+]], [[VAL]] -; GCN: s_addk_i32 [[EXT]], 0x3e7 -; GCN: s_or_b32 [[OR:s[0-9]+]], [[EXT]], 4 define amdgpu_kernel void @widen_i16_constant_load_sext_i32(i16 addrspace(4)* %arg) { +; SI-LABEL: widen_i16_constant_load_sext_i32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_sext_i32_i16 s0, s0 +; SI-NEXT: s_addk_i32 s0, 0x3e7 +; SI-NEXT: s_or_b32 s0, s0, 4 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_i16_constant_load_sext_i32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_sext_i32_i16 s0, s0 +; VI-NEXT: s_addk_i32 s0, 0x3e7 +; VI-NEXT: s_or_b32 s0, s0, 4 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm %load = load i16, i16 addrspace(4)* %arg, align 4 %ext = sext i16 %load to i32 %add = add i32 %ext, 999 @@ -41,12 +120,46 @@ define amdgpu_kernel void @widen_i16_constant_load_sext_i32(i16 addrspace(4)* %a ret void } -; GCN-LABEL: {{^}}widen_i17_constant_load: -; GCN: s_load_dword [[VAL:s[0-9]+]] -; GCN: s_add_i32 [[ADD:s[0-9]+]], [[VAL]], 34 -; GCN: s_or_b32 [[OR:s[0-9]+]], [[ADD]], 4 -; GCN: s_bfe_u32 s{{[0-9]+}}, [[OR]], 0x10010 define amdgpu_kernel void @widen_i17_constant_load(i17 addrspace(4)* %arg) { +; SI-LABEL: widen_i17_constant_load: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_add_i32 s0, s0, 34 +; SI-NEXT: s_or_b32 s0, s0, 4 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: s_bfe_u32 s0, s0, 0x10010 +; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; SI-NEXT: s_mov_b32 s4, 2 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_i17_constant_load: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v2, 2 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: v_mov_b32_e32 v3, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_add_i32 s0, s0, 34 +; VI-NEXT: s_or_b32 s0, s0, 4 +; VI-NEXT: v_mov_b32_e32 v4, s0 +; VI-NEXT: s_bfe_u32 s0, s0, 0x10010 +; VI-NEXT: v_mov_b32_e32 v5, s0 +; VI-NEXT: flat_store_short v[0:1], v4 +; VI-NEXT: flat_store_byte v[2:3], v5 +; VI-NEXT: s_endpgm %load = load i17, i17 addrspace(4)* %arg, align 4 %add = add i17 %load, 34 %or = or i17 %add, 4 @@ -54,13 +167,34 @@ define amdgpu_kernel void @widen_i17_constant_load(i17 addrspace(4)* %arg) { ret void } -; GCN-LABEL: {{^}}widen_f16_constant_load: -; GCN: s_load_dword [[VAL:s[0-9]+]] -; SI: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], [[VAL]] -; SI: v_add_f32_e32 [[ADD:v[0-9]+]], 4.0, [[CVT]] - -; VI: v_add_f16_e64 [[ADD:v[0-9]+]], [[VAL]], 4.0 define amdgpu_kernel void @widen_f16_constant_load(half addrspace(4)* %arg) { +; SI-LABEL: widen_f16_constant_load: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_mov_b32 s1, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v0, s0 +; SI-NEXT: s_mov_b32 s0, 0 +; SI-NEXT: v_add_f32_e32 v0, 4.0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_f16_constant_load: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_add_f16_e64 v2, s0, 4.0 +; VI-NEXT: flat_store_short v[0:1], v2 +; VI-NEXT: s_endpgm %load = load half, half addrspace(4)* %arg, align 4 %add = fadd half %load, 4.0 store half %add, half addrspace(1)* null @@ -68,21 +202,49 @@ define amdgpu_kernel void @widen_f16_constant_load(half addrspace(4)* %arg) { } ; FIXME: valu usage on VI -; GCN-LABEL: {{^}}widen_v2i8_constant_load: -; GCN: s_load_dword [[VAL:s[0-9]+]] - -; SI: s_add_i32 -; SI: s_or_b32 -; SI: s_addk_i32 -; SI: s_and_b32 -; SI: s_or_b32 -; SI: s_or_b32 - -; VI: s_add_i32 -; VI: v_add_u32_sdwa -; VI: v_or_b32_sdwa -; VI: v_or_b32_e32 define amdgpu_kernel void @widen_v2i8_constant_load(<2 x i8> addrspace(4)* %arg) { +; SI-LABEL: widen_v2i8_constant_load: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_and_b32 s1, s0, 0xff00 +; SI-NEXT: s_and_b32 s0, s0, 0xffff +; SI-NEXT: s_add_i32 s0, s0, 12 +; SI-NEXT: s_or_b32 s0, s0, 4 +; SI-NEXT: s_addk_i32 s1, 0x2c00 +; SI-NEXT: s_and_b32 s0, s0, 0xff +; SI-NEXT: s_or_b32 s0, s0, s1 +; SI-NEXT: s_or_b32 s0, s0, 0x300 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_v2i8_constant_load: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_mov_b32_e32 v0, 44 +; VI-NEXT: v_mov_b32_e32 v1, 3 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_and_b32 s1, s0, 0xffff +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: s_add_i32 s1, s1, 12 +; VI-NEXT: v_add_u32_sdwa v0, vcc, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; VI-NEXT: s_or_b32 s0, s1, 4 +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: s_and_b32 s0, s0, 0xff +; VI-NEXT: v_or_b32_e32 v2, s0, v0 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: flat_store_short v[0:1], v2 +; VI-NEXT: s_endpgm %load = load <2 x i8>, <2 x i8> addrspace(4)* %arg, align 4 %add = add <2 x i8> %load, %or = or <2 x i8> %add, @@ -90,9 +252,41 @@ define amdgpu_kernel void @widen_v2i8_constant_load(<2 x i8> addrspace(4)* %arg) ret void } -; GCN-LABEL: {{^}}no_widen_i16_constant_divergent_load: -; GCN: {{buffer|flat}}_load_ushort define amdgpu_kernel void @no_widen_i16_constant_divergent_load(i16 addrspace(4)* %arg) { +; SI-LABEL: no_widen_i16_constant_divergent_load: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: v_lshlrev_b32_e32 v0, 1, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_ushort v0, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s1, 0 +; SI-NEXT: s_mov_b32 s0, 0 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x3e7, v0 +; SI-NEXT: v_or_b32_e32 v0, 4, v0 +; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: no_widen_i16_constant_divergent_load: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_lshlrev_b32_e32 v2, 1, v0 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2 +; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc +; VI-NEXT: flat_load_ushort v2, v[2:3] +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_add_u16_e32 v2, 0x3e7, v2 +; VI-NEXT: v_or_b32_e32 v2, 4, v2 +; VI-NEXT: flat_store_short v[0:1], v2 +; VI-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = zext i32 %tid to i64 %gep.arg = getelementptr inbounds i16, i16 addrspace(4)* %arg, i64 %tid.ext @@ -103,22 +297,72 @@ define amdgpu_kernel void @no_widen_i16_constant_divergent_load(i16 addrspace(4) ret void } -; GCN-LABEL: {{^}}widen_i1_constant_load: -; GCN: s_load_dword [[VAL:s[0-9]+]] -; GCN: s_and_b32 {{s[0-9]+}}, [[VAL]], 1{{$}} define amdgpu_kernel void @widen_i1_constant_load(i1 addrspace(4)* %arg) { +; SI-LABEL: widen_i1_constant_load: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_and_b32 s0, s0, 1 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_i1_constant_load: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_and_b32 s0, s0, 1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: flat_store_byte v[0:1], v2 +; VI-NEXT: s_endpgm %load = load i1, i1 addrspace(4)* %arg, align 4 %and = and i1 %load, true store i1 %and, i1 addrspace(1)* null ret void } -; GCN-LABEL: {{^}}widen_i16_zextload_i64_constant_load: -; GCN: s_load_dword [[VAL:s[0-9]+]] -; GCN: s_and_b32 [[TRUNC:s[0-9]+]], [[VAL]], 0xffff{{$}} -; GCN: s_addk_i32 [[TRUNC]], 0x3e7 -; GCN: s_or_b32 [[OR:s[0-9]+]], [[TRUNC]], 4 define amdgpu_kernel void @widen_i16_zextload_i64_constant_load(i16 addrspace(4)* %arg) { +; SI-LABEL: widen_i16_zextload_i64_constant_load: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_and_b32 s0, s0, 0xffff +; SI-NEXT: s_addk_i32 s0, 0x3e7 +; SI-NEXT: s_or_b32 s0, s0, 4 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_i16_zextload_i64_constant_load: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_and_b32 s0, s0, 0xffff +; VI-NEXT: s_addk_i32 s0, 0x3e7 +; VI-NEXT: s_or_b32 s0, s0, 4 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm %load = load i16, i16 addrspace(4)* %arg, align 4 %zext = zext i16 %load to i32 %add = add i32 %zext, 999 @@ -127,12 +371,40 @@ define amdgpu_kernel void @widen_i16_zextload_i64_constant_load(i16 addrspace(4) ret void } -; GCN-LABEL: {{^}}widen_i1_zext_to_i64_constant_load: -; GCN: s_load_dword [[VAL:s[0-9]+]] -; GCN: s_and_b32 [[AND:s[0-9]+]], [[VAL]], 1 -; GCN: s_add_u32 [[ADD:s[0-9]+]], [[AND]], 0x3e7 -; GCN: s_addc_u32 s{{[0-9]+}}, 0, 0 define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(i1 addrspace(4)* %arg) { +; SI-LABEL: widen_i1_zext_to_i64_constant_load: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_and_b32 s0, s0, 1 +; SI-NEXT: s_add_u32 s0, s0, 0x3e7 +; SI-NEXT: s_addc_u32 s1, 0, 0 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: v_mov_b32_e32 v1, s1 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_i1_zext_to_i64_constant_load: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_and_b32 s0, s0, 1 +; VI-NEXT: s_add_u32 s0, s0, 0x3e7 +; VI-NEXT: s_addc_u32 s1, 0, 0 +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3] +; VI-NEXT: s_endpgm %load = load i1, i1 addrspace(4)* %arg, align 4 %zext = zext i1 %load to i64 %add = add i64 %zext, 999 @@ -140,11 +412,39 @@ define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(i1 addrspace(4)* % ret void } -; GCN-LABEL: {{^}}widen_i16_constant32_load: -; GCN: s_load_dword [[VAL:s[0-9]+]] -; GCN: s_addk_i32 [[VAL]], 0x3e7 -; GCN: s_or_b32 [[OR:s[0-9]+]], [[VAL]], 4 define amdgpu_kernel void @widen_i16_constant32_load(i16 addrspace(6)* %arg) { +; SI-LABEL: widen_i16_constant32_load: +; SI: ; %bb.0: +; SI-NEXT: s_load_dword s0, s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s1, 0 +; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_addk_i32 s0, 0x3e7 +; SI-NEXT: s_or_b32 s0, s0, 4 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_i16_constant32_load: +; VI: ; %bb.0: +; VI-NEXT: s_load_dword s0, s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s1, 0 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_and_b32 s0, s0, 0xffff +; VI-NEXT: s_addk_i32 s0, 0x3e7 +; VI-NEXT: s_or_b32 s0, s0, 4 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: flat_store_short v[0:1], v2 +; VI-NEXT: s_endpgm %load = load i16, i16 addrspace(6)* %arg, align 4 %add = add i16 %load, 999 %or = or i16 %add, 4 @@ -152,11 +452,37 @@ define amdgpu_kernel void @widen_i16_constant32_load(i16 addrspace(6)* %arg) { ret void } -; GCN-LABEL: {{^}}widen_i16_global_invariant_load: -; GCN: s_load_dword [[VAL:s[0-9]+]] -; GCN: s_addk_i32 [[VAL]], 0x3e7 -; GCN: s_or_b32 [[OR:s[0-9]+]], [[VAL]], 1 define amdgpu_kernel void @widen_i16_global_invariant_load(i16 addrspace(1)* %arg) { +; SI-LABEL: widen_i16_global_invariant_load: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b32 s5, 0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s0, s[0:1], 0x0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_addk_i32 s0, 0x3e7 +; SI-NEXT: s_or_b32 s0, s0, 1 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: widen_i16_global_invariant_load: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: v_mov_b32_e32 v0, 0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s0, s[0:1], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_and_b32 s0, s0, 0xffff +; VI-NEXT: s_addk_i32 s0, 0x3e7 +; VI-NEXT: s_or_b32 s0, s0, 1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: flat_store_short v[0:1], v2 +; VI-NEXT: s_endpgm %load = load i16, i16 addrspace(1)* %arg, align 4, !invariant.load !0 %add = add i16 %load, 999 %or = or i16 %add, 1 -- 2.50.1