bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
const MachineFunction &MF) const {
- // m0 is needed for the scalar store offset. m0 is unallocatable, so we can't
- // create a virtual register for it during frame index elimination, so the
- // scavenger is directly needed.
- return MF.getFrameInfo().hasStackObjects() &&
- MF.getSubtarget<GCNSubtarget>().hasScalarStores() &&
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ if (!MFI.hasStackObjects())
+ return false;
+
+ // The scavenger is used for large frames which may require finding a free
+ // register for large offsets.
+ if (!isUInt<12>(MFI.getStackSize()))
+ return true;
+
+ // If using scalar stores, for spills, m0 is needed for the scalar store
+ // offset (pre-GFX9). m0 is unallocatable, so we can't create a virtual
+ // register for it during frame index elimination, so the scavenger is
+ // directly needed.
+ return MF.getSubtarget<GCNSubtarget>().hasScalarStores() &&
MF.getInfo<SIMachineFunctionInfo>()->hasSpilledSGPRs();
}
%aptr = getelementptr i32, i32 addrspace(5)* %buf, i32 1
; 0x40000 / 64 = 4096 (for wave64)
- ; CHECK: s_add_u32 s7, s7, 0x40000
- ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s7 ; 4-byte Folded Spill
- ; CHECK: s_sub_u32 s7, s7, 0x40000
+ ; CHECK: s_add_u32 s6, s7, 0x40000
+ ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s6 ; 4-byte Folded Spill
%a = load volatile i32, i32 addrspace(5)* %aptr
; Force %a to spill
%bufv2 = bitcast i8 addrspace(5)* %alloca to <2 x i32> addrspace(5)*
; 0x3ff00 / 64 = 4092 (for wave64)
- ; CHECK: s_add_u32 s7, s7, 0x3ff00
- ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s7 ; 4-byte Folded Spill
- ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s7 offset:4 ; 4-byte Folded Spill
- ; CHECK: s_sub_u32 s7, s7, 0x3ff00
+ ; CHECK: s_add_u32 s6, s7, 0x3ff00
+ ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s6 ; 4-byte Folded Spill
+ ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s6 offset:4 ; 4-byte Folded Spill
%aptr = getelementptr <2 x i32>, <2 x i32> addrspace(5)* %bufv2, i32 1
%a = load volatile <2 x i32>, <2 x i32> addrspace(5)* %aptr
%aptr = getelementptr i32, i32 addrspace(5)* %buf, i32 1
; 0x40000 / 64 = 4096 (for wave64)
- ; CHECK: s_add_u32 s5, s5, 0x40000
- ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s5 ; 4-byte Folded Spill
- ; CHECK: s_sub_u32 s5, s5, 0x40000
+ ; CHECK: s_add_u32 s6, s5, 0x40000
+ ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s6 ; 4-byte Folded Spill
%a = load volatile i32, i32 addrspace(5)* %aptr
; Force %a to spill
%bufv2 = bitcast i8 addrspace(5)* %alloca to <2 x i32> addrspace(5)*
; 0x3ff00 / 64 = 4092 (for wave64)
- ; CHECK: s_add_u32 s5, s5, 0x3ff00
- ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s5 ; 4-byte Folded Spill
- ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s5 offset:4 ; 4-byte Folded Spill
- ; CHECK: s_sub_u32 s5, s5, 0x3ff00
+ ; CHECK: s_add_u32 s6, s5, 0x3ff00
+ ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s6 ; 4-byte Folded Spill
+ ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s6 offset:4 ; 4-byte Folded Spill
%aptr = getelementptr <2 x i32>, <2 x i32> addrspace(5)* %bufv2, i32 1
%a = load volatile <2 x i32>, <2 x i32> addrspace(5)* %aptr