From: Matt Arsenault Date: Tue, 16 Jul 2019 18:21:25 +0000 (+0000) Subject: AMDGPU: Replace store PatFrags X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=8daa536902752068708f1e418b1db9067544f349;p=llvm AMDGPU: Replace store PatFrags Convert the easy cases to formats understood for GlobalISel. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@366240 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AMDGPU/AMDGPUInstructions.td b/lib/Target/AMDGPU/AMDGPUInstructions.td index d470b3cd514..61bc415c839 100644 --- a/lib/Target/AMDGPU/AMDGPUInstructions.td +++ b/lib/Target/AMDGPU/AMDGPUInstructions.td @@ -467,25 +467,48 @@ def atomic_load_64_#as : PatFrag<(ops node:$ptr), (atomic_load_64 node:$ptr)> { let MemoryVT = i64; } +def store_#as : PatFrag<(ops node:$val, node:$ptr), + (unindexedstore node:$val, node:$ptr)> { + let IsStore = 1; + let IsTruncStore = 0; +} + +// truncstore fragments. +def truncstore_#as : PatFrag<(ops node:$val, node:$ptr), + (unindexedstore node:$val, node:$ptr)> { + let IsStore = 1; + let IsTruncStore = 1; +} + +// TODO: We don't really need the truncstore here. We can use +// unindexedstore with MemoryVT directly, which will save an +// unnecessary check that the memory size is less than the value type +// in the generated matcher table. +def truncstorei8_#as : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr)> { + let IsStore = 1; + let MemoryVT = i8; +} + +def truncstorei16_#as : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr)> { + let IsStore = 1; + let MemoryVT = i16; +} + +defm atomic_store_#as : binary_atomic_op; + } // End let AddressSpaces = ... } // End foreach AddrSpace -def store_private : PrivateStore ; -def truncstorei8_private : PrivateStore; -def truncstorei16_private : PrivateStore ; + def store_hi16_private : StoreHi16 , PrivateAddress; def truncstorei8_hi16_private : StoreHi16, PrivateAddress; -def store_global : GlobalStore ; -def truncstorei8_global : GlobalStore ; -def truncstorei16_global : GlobalStore ; def store_atomic_global : GlobalStore; def truncstorei8_hi16_global : StoreHi16 , GlobalAddress; def truncstorei16_hi16_global : StoreHi16 , GlobalAddress; -def store_local : LocalStore ; -def truncstorei8_local : LocalStore ; -def truncstorei16_local : LocalStore ; def store_local_hi16 : StoreHi16 , LocalAddress; def truncstorei8_local_hi16 : StoreHi16, LocalAddress; def atomic_store_local : LocalStore ; @@ -506,9 +529,6 @@ def store_align16_local : Aligned16Bytes < (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr) >; -def store_flat : FlatStore ; -def truncstorei8_flat : FlatStore ; -def truncstorei16_flat : FlatStore ; def atomic_store_flat : FlatStore ; def truncstorei8_hi16_flat : StoreHi16, FlatStoreAddress; def truncstorei16_hi16_flat : StoreHi16, FlatStoreAddress; diff --git a/lib/Target/AMDGPU/FLATInstructions.td b/lib/Target/AMDGPU/FLATInstructions.td index 9d541560613..8ddf4e2aa2b 100644 --- a/lib/Target/AMDGPU/FLATInstructions.td +++ b/lib/Target/AMDGPU/FLATInstructions.td @@ -792,8 +792,8 @@ def : FlatStorePat ; def : FlatStorePat ; def : FlatStorePat ; -def : FlatStoreAtomicPat ; -def : FlatStoreAtomicPat ; +def : FlatStoreAtomicPat ; +def : FlatStoreAtomicPat ; def : FlatAtomicPat ; def : FlatAtomicPat ;