-; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=SI --check-prefix=FUNC %s
-; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=VI --check-prefix=FUNC %s
-
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}atomic_add_i32_offset:
; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_add_i32_soffset:
+; GCN: s_mov_b32 [[SREG:s[0-9]+]], 0x8ca0
+; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], [[SREG]]{{$}}
+define void @atomic_add_i32_soffset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 9000
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_add_i32_huge_offset:
+; SI-DAG: v_mov_b32_e32 v[[PTRLO:[0-9]+]], 0xdeac
+; SI-DAG: v_mov_b32_e32 v[[PTRHI:[0-9]+]], 0xabcd
+; SI: buffer_atomic_add v{{[0-9]+}}, v{{\[}}[[PTRLO]]:[[PTRHI]]{{\]}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
+; VI: flat_atomic_add
+define void @atomic_add_i32_huge_offset(i32 addrspace(1)* %out, i32 %in) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 47224239175595
+
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_add_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; FUNC-LABEL: {{^}}atomic_add_i32_addr64_offset:
; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
; VI: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
-
define void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_add_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_add_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %0 = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_add_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
define void @atomic_add_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_and v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_and v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_and_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_and_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %0 = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
define void @atomic_and_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_sub v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_sub_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_sub v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_sub_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_sub_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %0 = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_sub_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
define void @atomic_sub_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_smax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_smax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_max_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_max_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %0 = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
define void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_umax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_umax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_umax_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_umax_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %0 = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
define void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_smin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_smin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_min_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_min_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %0 = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
define void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_umin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_umin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_umin_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_umin_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %0 = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
define void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_or v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_or_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_or v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_or_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_or_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %0 = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_or_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
define void @atomic_or_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_swap v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64_offset:
; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
+
+; VI: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}}
define void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64_offset:
; SI: buffer_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
+
; VI: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
; GCN: buffer_store_dword [[RET]]
define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_swap v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_xchg_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_xchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %0 = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
define void @atomic_xchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
-; CMP_SWAP
-
; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_offset:
; GCN: buffer_atomic_cmpswap v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_cmpxchg_i32_offset(i32 addrspace(1)* %out, i32 %in, i32 %old) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
ret void
}
; GCN: buffer_store_dword v[[RET]]
define void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i32 %old) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
- %1 = extractvalue { i32, i1 } %0, 0
- store i32 %1, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
+ %extract0 = extractvalue { i32, i1 } %val, 0
+ store i32 %extract0, i32 addrspace(1)* %out2
ret void
}
; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_addr64_offset:
; SI: buffer_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
+
+; VI: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
define void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
ret void
}
define void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
- %1 = extractvalue { i32, i1 } %0, 0
- store i32 %1, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
+ %extract0 = extractvalue { i32, i1 } %val, 0
+ store i32 %extract0, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_cmpxchg_i32(i32 addrspace(1)* %out, i32 %in, i32 %old) {
entry:
- %0 = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst
+ %val = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst
ret void
}
; GCN: buffer_store_dword v[[RET]]
define void @atomic_cmpxchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i32 %old) {
entry:
- %0 = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst
- %1 = extractvalue { i32, i1 } %0, 0
- store i32 %1, i32 addrspace(1)* %out2
+ %val = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst
+ %extract0 = extractvalue { i32, i1 } %val, 0
+ store i32 %extract0, i32 addrspace(1)* %out2
ret void
}
define void @atomic_cmpxchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst
+ %val = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst
ret void
}
define void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index, i32 %old) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst
- %1 = extractvalue { i32, i1 } %0, 0
- store i32 %1, i32 addrspace(1)* %out2
+ %val = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst
+ %extract0 = extractvalue { i32, i1 } %val, 0
+ store i32 %extract0, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_xor v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
- %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_xor v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_xor_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_xor_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %0 = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
define void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
define void @atomic_xor_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
- store i32 %0, i32 addrspace(1)* %out2
+ %val = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
ret void
}
-; ATOMIC_LOAD
; FUNC-LABEL: {{^}}atomic_load_i32_offset:
; SI: buffer_load_dword [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; VI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
; GCN: buffer_store_dword [[RET]]
define void @atomic_load_i32_offset(i32 addrspace(1)* %in, i32 addrspace(1)* %out) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %in, i32 4
- %0 = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4
- store i32 %0, i32 addrspace(1)* %out
+ %gep = getelementptr i32, i32 addrspace(1)* %in, i64 4
+ %val = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4
+ store i32 %val, i32 addrspace(1)* %out
ret void
}
; GCN: buffer_store_dword [[RET]]
define void @atomic_load_i32(i32 addrspace(1)* %in, i32 addrspace(1)* %out) {
entry:
- %0 = load atomic i32, i32 addrspace(1)* %in seq_cst, align 4
- store i32 %0, i32 addrspace(1)* %out
+ %val = load atomic i32, i32 addrspace(1)* %in seq_cst, align 4
+ store i32 %val, i32 addrspace(1)* %out
ret void
}
define void @atomic_load_i32_addr64_offset(i32 addrspace(1)* %in, i32 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %in, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
- %0 = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4
- store i32 %0, i32 addrspace(1)* %out
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4
+ store i32 %val, i32 addrspace(1)* %out
ret void
}
define void @atomic_load_i32_addr64(i32 addrspace(1)* %in, i32 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %in, i64 %index
- %0 = load atomic i32, i32 addrspace(1)* %ptr seq_cst, align 4
- store i32 %0, i32 addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}atomic_load_i64_offset:
-; SI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
-; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
-; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_load_i64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
-entry:
- %gep = getelementptr i64, i64 addrspace(1)* %in, i64 4
- %0 = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8
- store i64 %0, i64 addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}atomic_load_i64:
-; SI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
-; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc
-; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_load_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
-entry:
- %0 = load atomic i64, i64 addrspace(1)* %in seq_cst, align 8
- store i64 %0, i64 addrspace(1)* %out
+ %val = load atomic i32, i32 addrspace(1)* %ptr seq_cst, align 4
+ store i32 %val, i32 addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}atomic_load_i64_addr64_offset:
-; SI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
-; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
-; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_load_i64_addr64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
-entry:
- %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index
- %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %0 = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8
- store i64 %0, i64 addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}atomic_load_i64_addr64:
-; SI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
-; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
-; GCN: buffer_store_dwordx2 [[RET]]
-define void @atomic_load_i64_addr64(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
-entry:
- %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index
- %0 = load atomic i64, i64 addrspace(1)* %ptr seq_cst, align 8
- store i64 %0, i64 addrspace(1)* %out
- ret void
-}
-
-; ATOMIC_STORE
; FUNC-LABEL: {{^}}atomic_store_i32_offset:
; SI: buffer_store_dword {{v[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
; VI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
define void @atomic_store_i32_offset(i32 %in, i32 addrspace(1)* %out) {
entry:
- %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
store atomic i32 %in, i32 addrspace(1)* %gep seq_cst, align 4
ret void
}
define void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(1)* %out, i64 %index) {
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
store atomic i32 %in, i32 addrspace(1)* %gep seq_cst, align 4
ret void
}
store atomic i32 %in, i32 addrspace(1)* %ptr seq_cst, align 4
ret void
}
-
-; FUNC-LABEL: {{^}}atomic_store_i64_offset:
-; SI: buffer_store_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
-; VI: flat_store_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
-define void @atomic_store_i64_offset(i64 %in, i64 addrspace(1)* %out) {
-entry:
- %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}atomic_store_i64:
-; SI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
-; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}] glc
-define void @atomic_store_i64(i64 %in, i64 addrspace(1)* %out) {
-entry:
- store atomic i64 %in, i64 addrspace(1)* %out seq_cst, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}atomic_store_i64_addr64_offset:
-; SI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
-; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
-define void @atomic_store_i64_addr64_offset(i64 %in, i64 addrspace(1)* %out, i64 %index) {
-entry:
- %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8
- ret void
-}
-
-; FUNC-LABEL: {{^}}atomic_store_i64_addr64:
-; SI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
-; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
-define void @atomic_store_i64_addr64(i64 %in, i64 addrspace(1)* %out, i64 %index) {
-entry:
- %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- store atomic i64 %in, i64 addrspace(1)* %ptr seq_cst, align 8
- ret void
-}
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-
; GCN-LABEL: {{^}}atomic_add_i64_offset:
; GCN: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32{{$}}
define void @atomic_add_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
define void @atomic_add_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile add i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_add_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_add_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
ret void
}
; GCN: buffer_store_dwordx2 [[RET]]
define void @atomic_add_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile add i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_add_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
ret void
}
define void @atomic_add_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile add i64 addrspace(1)* %ptr, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_and_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
define void @atomic_and_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile and i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_and_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_and_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
ret void
}
; GCN: buffer_store_dwordx2 [[RET]]
define void @atomic_and_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile and i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_and_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
ret void
}
define void @atomic_and_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile and i64 addrspace(1)* %ptr, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_sub_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
define void @atomic_sub_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_sub_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_sub_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
ret void
}
; GCN: buffer_store_dwordx2 [[RET]]
define void @atomic_sub_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_sub_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
ret void
}
define void @atomic_sub_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile sub i64 addrspace(1)* %ptr, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_max_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
define void @atomic_max_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_smax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_max_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
ret void
}
; GCN: buffer_store_dwordx2 [[RET]]
define void @atomic_max_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_max_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
ret void
}
define void @atomic_max_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_umax_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
define void @atomic_umax_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_umax_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_umax_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
ret void
}
; GCN: buffer_store_dwordx2 [[RET]]
define void @atomic_umax_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_umax_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
ret void
}
define void @atomic_umax_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_min_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
define void @atomic_min_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_smin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_min_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
ret void
}
; GCN: buffer_store_dwordx2 [[RET]]
define void @atomic_min_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_min_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
ret void
}
define void @atomic_min_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_umin_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
define void @atomic_umin_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_umin_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_umin_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
ret void
}
; GCN: buffer_store_dwordx2 [[RET]]
define void @atomic_umin_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_umin_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
ret void
}
define void @atomic_umin_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_or_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
define void @atomic_or_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile or i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_or_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_or_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
ret void
}
; GCN: buffer_store_dwordx2 [[RET]]
define void @atomic_or_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile or i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_or_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
ret void
}
define void @atomic_or_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile or i64 addrspace(1)* %ptr, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_xchg_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
define void @atomic_xchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN-LABEL: {{^}}atomic_xchg_i64_addr64_offset:
; CI: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32{{$}}
+; VI: flat_atomic_swap_x2 v[{{[0-9]+:[0-9]+}}], v{{\[[0-9]+:[0-9]+\]}}{{$}}
define void @atomic_xchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_swap_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_xchg_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
ret void
}
; GCN: buffer_store_dwordx2 [[RET]]
define void @atomic_xchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_xchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
ret void
}
define void @atomic_xchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xchg i64 addrspace(1)* %ptr, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_xor_i64_offset(i64 addrspace(1)* %out, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
define void @atomic_xor_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
ret void
}
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %gep, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; GCN: buffer_atomic_xor_x2 v{{\[[0-9]+:[0-9]+\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
define void @atomic_xor_i64(i64 addrspace(1)* %out, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
ret void
}
; GCN: buffer_store_dwordx2 [[RET]]
define void @atomic_xor_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
entry:
- %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %out, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
define void @atomic_xor_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
ret void
}
define void @atomic_xor_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile xor i64 addrspace(1)* %ptr, i64 %in seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
+
+; FUNC-LABEL: {{^}}atomic_load_i64_offset:
+; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
+; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
+; GCN: buffer_store_dwordx2 [[RET]]
+define void @atomic_load_i64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %in, i64 4
+ %val = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8
+ store i64 %val, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_load_i64:
+; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
+; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc
+; GCN: buffer_store_dwordx2 [[RET]]
+define void @atomic_load_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %out) {
+entry:
+ %val = load atomic i64, i64 addrspace(1)* %in seq_cst, align 8
+ store i64 %val, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_load_i64_addr64_offset:
+; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
+; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
+; GCN: buffer_store_dwordx2 [[RET]]
+define void @atomic_load_i64_addr64_offset(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %val = load atomic i64, i64 addrspace(1)* %gep seq_cst, align 8
+ store i64 %val, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_load_i64_addr64:
+; CI: buffer_load_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
+; VI: flat_load_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
+; GCN: buffer_store_dwordx2 [[RET]]
+define void @atomic_load_i64_addr64(i64 addrspace(1)* %in, i64 addrspace(1)* %out, i64 %index) {
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %in, i64 %index
+ %val = load atomic i64, i64 addrspace(1)* %ptr seq_cst, align 8
+ store i64 %val, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i64_offset:
+; CI: buffer_store_dwordx2 [[RET:v\[[0-9]+:[0-9]+\]]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:32 glc{{$}}
+; VI: flat_store_dwordx2 [[RET:v\[[0-9]+:[0-9]\]]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
+define void @atomic_store_i64_offset(i64 %in, i64 addrspace(1)* %out) {
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i64:
+; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
+; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}] glc
+define void @atomic_store_i64(i64 %in, i64 addrspace(1)* %out) {
+entry:
+ store atomic i64 %in, i64 addrspace(1)* %out seq_cst, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i64_addr64_offset:
+; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:32 glc{{$}}
+; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
+define void @atomic_store_i64_addr64_offset(i64 %in, i64 addrspace(1)* %out, i64 %index) {
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ store atomic i64 %in, i64 addrspace(1)* %gep seq_cst, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_store_i64_addr64:
+; CI: buffer_store_dwordx2 {{v\[[0-9]+:[0-9]\]}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
+; VI: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, v[{{[0-9]+:[0-9]+}}] glc{{$}}
+define void @atomic_store_i64_addr64(i64 %in, i64 addrspace(1)* %out, i64 %index) {
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ store atomic i64 %in, i64 addrspace(1)* %ptr seq_cst, align 8
+ ret void
+}
+
+
+
+
+
+
+
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_offset:
+; GCN: buffer_atomic_cmpswapx2 v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
+define void @atomic_cmpxchg_i64_offset(i64 addrspace(1)* %out, i64 %in, i64 %old) {
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_offset:
+; GCN: buffer_atomic_cmpswapx2 v{{\[}}[[RET:[0-9]+]]{{:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
+; GCN: buffer_store_dwordx2 v[[RET]]
+define void @atomic_cmpxchg_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) {
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
+ %extract0 = extractvalue { i64, i1 } %val, 0
+ store i64 %extract0, i64 addrspace(1)* %out2
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_addr64_offset:
+; SI: buffer_atomic_cmpswapx2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
+
+; VI: flat_atomic_cmpswapx2 v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
+define void @atomic_cmpxchg_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) {
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_addr64_offset:
+; SI: buffer_atomic_cmpswapx2 v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16 glc{{$}}
+; VI: flat_atomic_cmpswapx2 v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
+; GCN: buffer_store_dword v[[RET]]
+define void @atomic_cmpxchg_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) {
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %val = cmpxchg volatile i64 addrspace(1)* %gep, i64 %old, i64 %in seq_cst seq_cst
+ %extract0 = extractvalue { i64, i1 } %val, 0
+ store i64 %extract0, i64 addrspace(1)* %out2
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64:
+; GCN: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
+define void @atomic_cmpxchg_i64(i64 addrspace(1)* %out, i64 %in, i64 %old) {
+entry:
+ %val = cmpxchg volatile i64 addrspace(1)* %out, i64 %old, i64 %in seq_cst seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret:
+; GCN: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
+; GCN: buffer_store_dword v[[RET]]
+define void @atomic_cmpxchg_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %old) {
+entry:
+ %val = cmpxchg volatile i64 addrspace(1)* %out, i64 %old, i64 %in seq_cst seq_cst
+ %extract0 = extractvalue { i64, i1 } %val, 0
+ store i64 %extract0, i64 addrspace(1)* %out2
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_addr64:
+; SI: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
+; VI: flat_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}}
+define void @atomic_cmpxchg_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index, i64 %old) {
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %val = cmpxchg volatile i64 addrspace(1)* %ptr, i64 %old, i64 %in seq_cst seq_cst
+ ret void
+}
+
+; FUNC-LABEL: {{^}}atomic_cmpxchg_i64_ret_addr64:
+; SI: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
+; VI: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
+; GCN: buffer_store_dword v[[RET]]
+define void @atomic_cmpxchg_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index, i64 %old) {
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %val = cmpxchg volatile i64 addrspace(1)* %ptr, i64 %old, i64 %in seq_cst seq_cst
+ %extract0 = extractvalue { i64, i1 } %val, 0
+ store i64 %extract0, i64 addrspace(1)* %out2
+ ret void
+}