; RUN: | FileCheck -check-prefix=RV32IA %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64IA %s
define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) {
; RV32I-LABEL: cmpxchg_i8_monotonic_monotonic:
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i8_monotonic_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a4, a4, a3
+; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a3, (a0)
+; RV64IA-NEXT: and a5, a3, a4
+; RV64IA-NEXT: bne a5, a1, .LBB0_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB0_1 Depth=1
+; RV64IA-NEXT: xor a5, a3, a2
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a3, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB0_1
+; RV64IA-NEXT: .LBB0_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i8_acquire_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a4, a4, a3
+; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a3, (a0)
+; RV64IA-NEXT: and a5, a3, a4
+; RV64IA-NEXT: bne a5, a1, .LBB1_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB1_1 Depth=1
+; RV64IA-NEXT: xor a5, a3, a2
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a3, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB1_1
+; RV64IA-NEXT: .LBB1_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i8_acquire_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a4, a4, a3
+; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a3, (a0)
+; RV64IA-NEXT: and a5, a3, a4
+; RV64IA-NEXT: bne a5, a1, .LBB2_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB2_1 Depth=1
+; RV64IA-NEXT: xor a5, a3, a2
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a3, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB2_1
+; RV64IA-NEXT: .LBB2_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i8_release_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a4, a4, a3
+; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a3, (a0)
+; RV64IA-NEXT: and a5, a3, a4
+; RV64IA-NEXT: bne a5, a1, .LBB3_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB3_1 Depth=1
+; RV64IA-NEXT: xor a5, a3, a2
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a3, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB3_1
+; RV64IA-NEXT: .LBB3_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i8_release_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a4, a4, a3
+; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a3, (a0)
+; RV64IA-NEXT: and a5, a3, a4
+; RV64IA-NEXT: bne a5, a1, .LBB4_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB4_1 Depth=1
+; RV64IA-NEXT: xor a5, a3, a2
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a3, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB4_1
+; RV64IA-NEXT: .LBB4_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i8_acq_rel_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a4, a4, a3
+; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a3, (a0)
+; RV64IA-NEXT: and a5, a3, a4
+; RV64IA-NEXT: bne a5, a1, .LBB5_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1
+; RV64IA-NEXT: xor a5, a3, a2
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a3, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB5_1
+; RV64IA-NEXT: .LBB5_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i8_acq_rel_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a4, a4, a3
+; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a3, (a0)
+; RV64IA-NEXT: and a5, a3, a4
+; RV64IA-NEXT: bne a5, a1, .LBB6_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1
+; RV64IA-NEXT: xor a5, a3, a2
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a3, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB6_1
+; RV64IA-NEXT: .LBB6_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i8_seq_cst_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a4, a4, a3
+; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a3, (a0)
+; RV64IA-NEXT: and a5, a3, a4
+; RV64IA-NEXT: bne a5, a1, .LBB7_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1
+; RV64IA-NEXT: xor a5, a3, a2
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a3, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB7_1
+; RV64IA-NEXT: .LBB7_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i8_seq_cst_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a4, a4, a3
+; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a3, (a0)
+; RV64IA-NEXT: and a5, a3, a4
+; RV64IA-NEXT: bne a5, a1, .LBB8_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1
+; RV64IA-NEXT: xor a5, a3, a2
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a3, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB8_1
+; RV64IA-NEXT: .LBB8_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i8_seq_cst_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a4, a4, a3
+; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a3, (a0)
+; RV64IA-NEXT: and a5, a3, a4
+; RV64IA-NEXT: bne a5, a1, .LBB9_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1
+; RV64IA-NEXT: xor a5, a3, a2
+; RV64IA-NEXT: and a5, a5, a4
+; RV64IA-NEXT: xor a5, a3, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB9_1
+; RV64IA-NEXT: .LBB9_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i16_monotonic_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: and a1, a1, a3
+; RV64IA-NEXT: and a2, a2, a3
+; RV64IA-NEXT: andi a4, a0, 3
+; RV64IA-NEXT: slli a4, a4, 3
+; RV64IA-NEXT: sllw a3, a3, a4
+; RV64IA-NEXT: sllw a2, a2, a4
+; RV64IA-NEXT: sllw a1, a1, a4
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a5, a4, a3
+; RV64IA-NEXT: bne a5, a1, .LBB10_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a2
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB10_1
+; RV64IA-NEXT: .LBB10_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i16_acquire_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: and a1, a1, a3
+; RV64IA-NEXT: and a2, a2, a3
+; RV64IA-NEXT: andi a4, a0, 3
+; RV64IA-NEXT: slli a4, a4, 3
+; RV64IA-NEXT: sllw a3, a3, a4
+; RV64IA-NEXT: sllw a2, a2, a4
+; RV64IA-NEXT: sllw a1, a1, a4
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a5, a4, a3
+; RV64IA-NEXT: bne a5, a1, .LBB11_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a2
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB11_1
+; RV64IA-NEXT: .LBB11_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i16_acquire_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: and a1, a1, a3
+; RV64IA-NEXT: and a2, a2, a3
+; RV64IA-NEXT: andi a4, a0, 3
+; RV64IA-NEXT: slli a4, a4, 3
+; RV64IA-NEXT: sllw a3, a3, a4
+; RV64IA-NEXT: sllw a2, a2, a4
+; RV64IA-NEXT: sllw a1, a1, a4
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a5, a4, a3
+; RV64IA-NEXT: bne a5, a1, .LBB12_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB12_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a2
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB12_1
+; RV64IA-NEXT: .LBB12_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i16_release_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: and a1, a1, a3
+; RV64IA-NEXT: and a2, a2, a3
+; RV64IA-NEXT: andi a4, a0, 3
+; RV64IA-NEXT: slli a4, a4, 3
+; RV64IA-NEXT: sllw a3, a3, a4
+; RV64IA-NEXT: sllw a2, a2, a4
+; RV64IA-NEXT: sllw a1, a1, a4
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a5, a4, a3
+; RV64IA-NEXT: bne a5, a1, .LBB13_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a2
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB13_1
+; RV64IA-NEXT: .LBB13_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i16_release_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: and a1, a1, a3
+; RV64IA-NEXT: and a2, a2, a3
+; RV64IA-NEXT: andi a4, a0, 3
+; RV64IA-NEXT: slli a4, a4, 3
+; RV64IA-NEXT: sllw a3, a3, a4
+; RV64IA-NEXT: sllw a2, a2, a4
+; RV64IA-NEXT: sllw a1, a1, a4
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a5, a4, a3
+; RV64IA-NEXT: bne a5, a1, .LBB14_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB14_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a2
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB14_1
+; RV64IA-NEXT: .LBB14_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i16_acq_rel_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: and a1, a1, a3
+; RV64IA-NEXT: and a2, a2, a3
+; RV64IA-NEXT: andi a4, a0, 3
+; RV64IA-NEXT: slli a4, a4, 3
+; RV64IA-NEXT: sllw a3, a3, a4
+; RV64IA-NEXT: sllw a2, a2, a4
+; RV64IA-NEXT: sllw a1, a1, a4
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a5, a4, a3
+; RV64IA-NEXT: bne a5, a1, .LBB15_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB15_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a2
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB15_1
+; RV64IA-NEXT: .LBB15_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i16_acq_rel_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: and a1, a1, a3
+; RV64IA-NEXT: and a2, a2, a3
+; RV64IA-NEXT: andi a4, a0, 3
+; RV64IA-NEXT: slli a4, a4, 3
+; RV64IA-NEXT: sllw a3, a3, a4
+; RV64IA-NEXT: sllw a2, a2, a4
+; RV64IA-NEXT: sllw a1, a1, a4
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a5, a4, a3
+; RV64IA-NEXT: bne a5, a1, .LBB16_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB16_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a2
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB16_1
+; RV64IA-NEXT: .LBB16_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i16_seq_cst_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: and a1, a1, a3
+; RV64IA-NEXT: and a2, a2, a3
+; RV64IA-NEXT: andi a4, a0, 3
+; RV64IA-NEXT: slli a4, a4, 3
+; RV64IA-NEXT: sllw a3, a3, a4
+; RV64IA-NEXT: sllw a2, a2, a4
+; RV64IA-NEXT: sllw a1, a1, a4
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: and a5, a4, a3
+; RV64IA-NEXT: bne a5, a1, .LBB17_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB17_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a2
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB17_1
+; RV64IA-NEXT: .LBB17_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i16_seq_cst_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: and a1, a1, a3
+; RV64IA-NEXT: and a2, a2, a3
+; RV64IA-NEXT: andi a4, a0, 3
+; RV64IA-NEXT: slli a4, a4, 3
+; RV64IA-NEXT: sllw a3, a3, a4
+; RV64IA-NEXT: sllw a2, a2, a4
+; RV64IA-NEXT: sllw a1, a1, a4
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: and a5, a4, a3
+; RV64IA-NEXT: bne a5, a1, .LBB18_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB18_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a2
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB18_1
+; RV64IA-NEXT: .LBB18_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i16_seq_cst_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a3, 16
+; RV64IA-NEXT: addiw a3, a3, -1
+; RV64IA-NEXT: and a1, a1, a3
+; RV64IA-NEXT: and a2, a2, a3
+; RV64IA-NEXT: andi a4, a0, 3
+; RV64IA-NEXT: slli a4, a4, 3
+; RV64IA-NEXT: sllw a3, a3, a4
+; RV64IA-NEXT: sllw a2, a2, a4
+; RV64IA-NEXT: sllw a1, a1, a4
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: and a5, a4, a3
+; RV64IA-NEXT: bne a5, a1, .LBB19_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB19_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a2
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB19_1
+; RV64IA-NEXT: .LBB19_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i32_monotonic_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB20_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB20_1 Depth=1
+; RV64IA-NEXT: sc.w a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB20_1
+; RV64IA-NEXT: .LBB20_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i32_acquire_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB21_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB21_1 Depth=1
+; RV64IA-NEXT: sc.w a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB21_1
+; RV64IA-NEXT: .LBB21_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i32_acquire_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB22_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB22_1 Depth=1
+; RV64IA-NEXT: sc.w a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB22_1
+; RV64IA-NEXT: .LBB22_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i32_release_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB23_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB23_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB23_1
+; RV64IA-NEXT: .LBB23_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i32_release_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB24_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB24_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB24_1
+; RV64IA-NEXT: .LBB24_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i32_acq_rel_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB25_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB25_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB25_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB25_1
+; RV64IA-NEXT: .LBB25_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i32_acq_rel_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB26_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB26_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB26_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB26_1
+; RV64IA-NEXT: .LBB26_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i32_seq_cst_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB27_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB27_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB27_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB27_1
+; RV64IA-NEXT: .LBB27_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i32_seq_cst_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB28_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB28_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB28_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB28_1
+; RV64IA-NEXT: .LBB28_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i32_seq_cst_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB29_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB29_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB29_1
+; RV64IA-NEXT: .LBB29_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i64_monotonic_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB30_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB30_1 Depth=1
+; RV64IA-NEXT: sc.d a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB30_1
+; RV64IA-NEXT: .LBB30_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i64_acquire_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d.aq a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB31_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB31_1 Depth=1
+; RV64IA-NEXT: sc.d a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB31_1
+; RV64IA-NEXT: .LBB31_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i64_acquire_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d.aq a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB32_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB32_1 Depth=1
+; RV64IA-NEXT: sc.d a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB32_1
+; RV64IA-NEXT: .LBB32_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i64_release_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB33_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB33_1 Depth=1
+; RV64IA-NEXT: sc.d.rl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB33_1
+; RV64IA-NEXT: .LBB33_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i64_release_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB34_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB34_1 Depth=1
+; RV64IA-NEXT: sc.d.rl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB34_1
+; RV64IA-NEXT: .LBB34_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i64_acq_rel_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d.aq a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB35_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB35_1 Depth=1
+; RV64IA-NEXT: sc.d.rl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB35_1
+; RV64IA-NEXT: .LBB35_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i64_acq_rel_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d.aq a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB36_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB36_1 Depth=1
+; RV64IA-NEXT: sc.d.rl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB36_1
+; RV64IA-NEXT: .LBB36_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i64_seq_cst_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d.aqrl a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB37_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1
+; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB37_1
+; RV64IA-NEXT: .LBB37_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i64_seq_cst_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d.aqrl a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB38_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB38_1 Depth=1
+; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB38_1
+; RV64IA-NEXT: .LBB38_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire
ret void
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: cmpxchg_i64_seq_cst_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d.aqrl a3, (a0)
+; RV64IA-NEXT: bne a3, a1, .LBB39_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB39_1 Depth=1
+; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0)
+; RV64IA-NEXT: bnez a4, .LBB39_1
+; RV64IA-NEXT: .LBB39_3:
+; RV64IA-NEXT: ret
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst
ret void
}
; RUN: | FileCheck -check-prefix=RV32IA %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64IA %s
define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) {
; RV32I-LABEL: atomicrmw_xchg_i8_monotonic:
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: add a5, zero, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB0_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: add a5, zero, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB1_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: add a5, zero, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB2_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: add a5, zero, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB3_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: add a5, zero, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB4_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB5_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw add i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB6_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw add i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB7_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw add i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB8_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw add i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB9_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw add i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: sub a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB10_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: sub a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB11_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: sub a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB12_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: sub a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB13_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: sub a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB14_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sll a3, a3, a2
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: or a1, a3, a1
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoand.w a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw and i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sll a3, a3, a2
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: or a1, a3, a1
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoand.w.aq a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw and i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sll a3, a3, a2
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: or a1, a3, a1
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoand.w.rl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw and i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sll a3, a3, a2
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: or a1, a3, a1
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw and i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sll a3, a3, a2
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: or a1, a3, a1
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw and i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a5, a4, a1
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB20_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a5, a4, a1
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB21_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a5, a4, a1
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB22_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a5, a4, a1
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB23_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a3, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: and a5, a4, a1
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a3
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB24_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoor.w a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw or i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoor.w.aq a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw or i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoor.w.rl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw or i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw or i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw or i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoxor.w a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 56
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 56
+; RV64IA-NEXT: srai a1, a1, 56
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a4, a1, .LBB35_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB35_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB35_3: # in Loop: Header=BB35_1 Depth=1
+; RV64IA-NEXT: sc.w a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB35_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw max i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 56
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 56
+; RV64IA-NEXT: srai a1, a1, 56
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a4, a1, .LBB36_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB36_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB36_3: # in Loop: Header=BB36_1 Depth=1
+; RV64IA-NEXT: sc.w a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB36_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw max i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 56
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 56
+; RV64IA-NEXT: srai a1, a1, 56
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a4, a1, .LBB37_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB37_3: # in Loop: Header=BB37_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB37_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw max i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 56
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 56
+; RV64IA-NEXT: srai a1, a1, 56
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a4, a1, .LBB38_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB38_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB38_3: # in Loop: Header=BB38_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB38_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw max i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 56
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 56
+; RV64IA-NEXT: srai a1, a1, 56
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a4, a1, .LBB39_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB39_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB39_3: # in Loop: Header=BB39_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB39_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw max i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 56
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 56
+; RV64IA-NEXT: srai a1, a1, 56
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a1, a4, .LBB40_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB40_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB40_3: # in Loop: Header=BB40_1 Depth=1
+; RV64IA-NEXT: sc.w a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB40_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw min i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 56
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 56
+; RV64IA-NEXT: srai a1, a1, 56
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a1, a4, .LBB41_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB41_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB41_3: # in Loop: Header=BB41_1 Depth=1
+; RV64IA-NEXT: sc.w a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB41_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw min i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 56
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 56
+; RV64IA-NEXT: srai a1, a1, 56
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a1, a4, .LBB42_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB42_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB42_3: # in Loop: Header=BB42_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB42_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw min i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 56
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 56
+; RV64IA-NEXT: srai a1, a1, 56
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a1, a4, .LBB43_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB43_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB43_3: # in Loop: Header=BB43_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB43_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw min i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 56
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: addi a4, zero, 255
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 56
+; RV64IA-NEXT: srai a1, a1, 56
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a1, a4, .LBB44_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB44_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB44_3: # in Loop: Header=BB44_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB44_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw min i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a6, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a3, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a3, a1, .LBB45_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB45_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB45_3: # in Loop: Header=BB45_1 Depth=1
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB45_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a6, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB46_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a3, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a3, a1, .LBB46_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB46_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB46_3: # in Loop: Header=BB46_1 Depth=1
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB46_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a6, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB47_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a3, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a3, a1, .LBB47_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB47_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB47_3: # in Loop: Header=BB47_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB47_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a6, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a3, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a3, a1, .LBB48_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB48_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB48_3: # in Loop: Header=BB48_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB48_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a6, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: and a3, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a3, a1, .LBB49_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB49_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB49_3: # in Loop: Header=BB49_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB49_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i8_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a6, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a3, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a1, a3, .LBB50_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB50_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB50_3: # in Loop: Header=BB50_1 Depth=1
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB50_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i8* %a, i8 %b monotonic
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i8_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a6, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB51_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a3, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a1, a3, .LBB51_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB51_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB51_3: # in Loop: Header=BB51_1 Depth=1
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB51_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i8* %a, i8 %b acquire
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i8_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a6, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a3, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a1, a3, .LBB52_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB52_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB52_3: # in Loop: Header=BB52_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB52_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i8* %a, i8 %b release
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i8_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a6, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB53_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a3, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a1, a3, .LBB53_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB53_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB53_3: # in Loop: Header=BB53_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB53_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i8* %a, i8 %b acq_rel
ret i8 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i8_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: addi a3, zero, 255
+; RV64IA-NEXT: sllw a6, a3, a2
+; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB54_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: and a3, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a1, a3, .LBB54_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB54_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB54_3: # in Loop: Header=BB54_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB54_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i8* %a, i8 %b seq_cst
ret i8 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: add a5, zero, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB55_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: add a5, zero, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB56_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: add a5, zero, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB57_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: add a5, zero, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB58_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: add a5, zero, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB59_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB60_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw add i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB61_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw add i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB62_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw add i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB63_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw add i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: add a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB64_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw add i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB65_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: sub a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB65_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB66_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: sub a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB66_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB67_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: sub a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB67_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB68_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: sub a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB68_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB69_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: sub a5, a4, a1
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB69_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sll a1, a1, a3
+; RV64IA-NEXT: sll a2, a2, a3
+; RV64IA-NEXT: not a2, a2
+; RV64IA-NEXT: or a1, a2, a1
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoand.w a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw and i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sll a1, a1, a3
+; RV64IA-NEXT: sll a2, a2, a3
+; RV64IA-NEXT: not a2, a2
+; RV64IA-NEXT: or a1, a2, a1
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoand.w.aq a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw and i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sll a1, a1, a3
+; RV64IA-NEXT: sll a2, a2, a3
+; RV64IA-NEXT: not a2, a2
+; RV64IA-NEXT: or a1, a2, a1
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoand.w.rl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw and i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sll a1, a1, a3
+; RV64IA-NEXT: sll a2, a2, a3
+; RV64IA-NEXT: not a2, a2
+; RV64IA-NEXT: or a1, a2, a1
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw and i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sll a1, a1, a3
+; RV64IA-NEXT: sll a2, a2, a3
+; RV64IA-NEXT: not a2, a2
+; RV64IA-NEXT: or a1, a2, a1
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw and i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB75_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a5, a4, a1
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB75_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB76_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a5, a4, a1
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB76_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB77_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a5, a4, a1
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB77_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB78_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a5, a4, a1
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB78_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a2, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB79_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: and a5, a4, a1
+; RV64IA-NEXT: not a5, a5
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: and a5, a5, a2
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB79_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoor.w a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw or i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoor.w.aq a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw or i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoor.w.rl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw or i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw or i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw or i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoxor.w a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a2, a0, 3
+; RV64IA-NEXT: slli a2, a2, 3
+; RV64IA-NEXT: sll a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: srlw a0, a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 48
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: lui a4, 16
+; RV64IA-NEXT: addiw a4, a4, -1
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 48
+; RV64IA-NEXT: srai a1, a1, 48
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a4, a1, .LBB90_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB90_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB90_3: # in Loop: Header=BB90_1 Depth=1
+; RV64IA-NEXT: sc.w a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB90_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw max i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 48
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: lui a4, 16
+; RV64IA-NEXT: addiw a4, a4, -1
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 48
+; RV64IA-NEXT: srai a1, a1, 48
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a4, a1, .LBB91_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB91_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB91_3: # in Loop: Header=BB91_1 Depth=1
+; RV64IA-NEXT: sc.w a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB91_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw max i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 48
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: lui a4, 16
+; RV64IA-NEXT: addiw a4, a4, -1
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 48
+; RV64IA-NEXT: srai a1, a1, 48
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a4, a1, .LBB92_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB92_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB92_3: # in Loop: Header=BB92_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB92_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw max i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 48
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: lui a4, 16
+; RV64IA-NEXT: addiw a4, a4, -1
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 48
+; RV64IA-NEXT: srai a1, a1, 48
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a4, a1, .LBB93_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB93_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB93_3: # in Loop: Header=BB93_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB93_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw max i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 48
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: lui a4, 16
+; RV64IA-NEXT: addiw a4, a4, -1
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 48
+; RV64IA-NEXT: srai a1, a1, 48
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a4, a1, .LBB94_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB94_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB94_3: # in Loop: Header=BB94_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB94_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw max i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 48
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: lui a4, 16
+; RV64IA-NEXT: addiw a4, a4, -1
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 48
+; RV64IA-NEXT: srai a1, a1, 48
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a1, a4, .LBB95_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB95_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB95_3: # in Loop: Header=BB95_1 Depth=1
+; RV64IA-NEXT: sc.w a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB95_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw min i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 48
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: lui a4, 16
+; RV64IA-NEXT: addiw a4, a4, -1
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 48
+; RV64IA-NEXT: srai a1, a1, 48
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a1, a4, .LBB96_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB96_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB96_3: # in Loop: Header=BB96_1 Depth=1
+; RV64IA-NEXT: sc.w a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB96_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw min i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 48
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: lui a4, 16
+; RV64IA-NEXT: addiw a4, a4, -1
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 48
+; RV64IA-NEXT: srai a1, a1, 48
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a1, a4, .LBB97_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB97_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB97_3: # in Loop: Header=BB97_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB97_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw min i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 48
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: lui a4, 16
+; RV64IA-NEXT: addiw a4, a4, -1
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 48
+; RV64IA-NEXT: srai a1, a1, 48
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a1, a4, .LBB98_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB98_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB98_3: # in Loop: Header=BB98_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB98_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw min i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: slli a2, a0, 3
+; RV64IA-NEXT: andi a2, a2, 24
+; RV64IA-NEXT: addi a3, zero, 48
+; RV64IA-NEXT: sub a6, a3, a2
+; RV64IA-NEXT: lui a4, 16
+; RV64IA-NEXT: addiw a4, a4, -1
+; RV64IA-NEXT: sllw a7, a4, a2
+; RV64IA-NEXT: slli a1, a1, 48
+; RV64IA-NEXT: srai a1, a1, 48
+; RV64IA-NEXT: sllw a1, a1, a2
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a5, (a0)
+; RV64IA-NEXT: and a4, a5, a7
+; RV64IA-NEXT: mv a3, a5
+; RV64IA-NEXT: sll a4, a4, a6
+; RV64IA-NEXT: sra a4, a4, a6
+; RV64IA-NEXT: bge a1, a4, .LBB99_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB99_1 Depth=1
+; RV64IA-NEXT: xor a3, a5, a1
+; RV64IA-NEXT: and a3, a3, a7
+; RV64IA-NEXT: xor a3, a5, a3
+; RV64IA-NEXT: .LBB99_3: # in Loop: Header=BB99_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB99_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a5, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw min i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a6, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB100_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a2, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a2, a1, .LBB100_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB100_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB100_3: # in Loop: Header=BB100_1 Depth=1
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB100_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a6, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB101_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a2, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a2, a1, .LBB101_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB101_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB101_3: # in Loop: Header=BB101_1 Depth=1
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB101_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a6, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB102_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a2, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a2, a1, .LBB102_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB102_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB102_3: # in Loop: Header=BB102_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB102_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 72(sp)
; RV64I-NEXT: addi sp, sp, 80
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a6, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB103_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a2, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a2, a1, .LBB103_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB103_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB103_3: # in Loop: Header=BB103_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB103_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a6, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB104_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: and a2, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a2, a1, .LBB104_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB104_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB104_3: # in Loop: Header=BB104_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB104_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i16_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a6, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB105_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a2, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a1, a2, .LBB105_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB105_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB105_3: # in Loop: Header=BB105_1 Depth=1
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB105_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i16* %a, i16 %b monotonic
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i16_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a6, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB106_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a2, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a1, a2, .LBB106_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB106_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB106_3: # in Loop: Header=BB106_1 Depth=1
+; RV64IA-NEXT: sc.w a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB106_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i16* %a, i16 %b acquire
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i16_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a6, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB107_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a4, (a0)
+; RV64IA-NEXT: and a2, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a1, a2, .LBB107_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB107_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB107_3: # in Loop: Header=BB107_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB107_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i16* %a, i16 %b release
ret i16 %1
}
; RV64I-NEXT: ld ra, 72(sp)
; RV64I-NEXT: addi sp, sp, 80
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i16_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a6, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB108_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a4, (a0)
+; RV64IA-NEXT: and a2, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a1, a2, .LBB108_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB108_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB108_3: # in Loop: Header=BB108_1 Depth=1
+; RV64IA-NEXT: sc.w.rl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB108_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i16* %a, i16 %b acq_rel
ret i16 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i16_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: lui a2, 16
+; RV64IA-NEXT: addiw a2, a2, -1
+; RV64IA-NEXT: and a1, a1, a2
+; RV64IA-NEXT: andi a3, a0, 3
+; RV64IA-NEXT: slli a3, a3, 3
+; RV64IA-NEXT: sllw a6, a2, a3
+; RV64IA-NEXT: sllw a1, a1, a3
+; RV64IA-NEXT: andi a0, a0, -4
+; RV64IA-NEXT: .LBB109_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a4, (a0)
+; RV64IA-NEXT: and a2, a4, a6
+; RV64IA-NEXT: mv a5, a4
+; RV64IA-NEXT: bgeu a1, a2, .LBB109_3
+; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB109_1 Depth=1
+; RV64IA-NEXT: xor a5, a4, a1
+; RV64IA-NEXT: and a5, a5, a6
+; RV64IA-NEXT: xor a5, a4, a5
+; RV64IA-NEXT: .LBB109_3: # in Loop: Header=BB109_1 Depth=1
+; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0)
+; RV64IA-NEXT: bnez a5, .LBB109_1
+; RV64IA-NEXT: # %bb.4:
+; RV64IA-NEXT: srlw a0, a4, a3
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i16* %a, i16 %b seq_cst
ret i16 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoswap.w a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoswap.w.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoswap.w.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoswap.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoswap.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.w a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw add i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.w.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw add i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.w.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw add i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw add i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw add i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: neg a1, a1
+; RV64IA-NEXT: amoadd.w a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: neg a1, a1
+; RV64IA-NEXT: amoadd.w.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: neg a1, a1
+; RV64IA-NEXT: amoadd.w.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: neg a1, a1
+; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: neg a1, a1
+; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoand.w a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw and i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoand.w.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw and i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoand.w.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw and i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw and i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw and i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB130_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a2, (a0)
+; RV64IA-NEXT: and a3, a2, a1
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: sc.w a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB130_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB131_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a2, (a0)
+; RV64IA-NEXT: and a3, a2, a1
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: sc.w a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB131_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB132_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w a2, (a0)
+; RV64IA-NEXT: and a3, a2, a1
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB132_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB133_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aq a2, (a0)
+; RV64IA-NEXT: and a3, a2, a1
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB133_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB134_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.w.aqrl a2, (a0)
+; RV64IA-NEXT: and a3, a2, a1
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB134_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoor.w a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw or i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoor.w.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw or i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoor.w.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw or i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw or i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw or i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoxor.w a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomax.w a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw max i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomax.w.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw max i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomax.w.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw max i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomax.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw max i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomax.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw max i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomin.w a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw min i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomin.w.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw min i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomin.w.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw min i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomin.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw min i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomin.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw min i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomaxu.w a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomaxu.w.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomaxu.w.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomaxu.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomaxu.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i32_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amominu.w a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i32* %a, i32 %b monotonic
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i32_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amominu.w.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i32* %a, i32 %b acquire
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i32_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amominu.w.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i32* %a, i32 %b release
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i32_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amominu.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i32* %a, i32 %b acq_rel
ret i32 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amominu.w.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i32* %a, i32 %b seq_cst
ret i32 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoswap.d a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoswap.d.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoswap.d.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoswap.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xchg_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoswap.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xchg i64* %a, i64 %b seq_cst
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.d a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw add i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.d.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw add i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.d.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw add i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw add i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_add_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw add i64* %a, i64 %b seq_cst
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: neg a1, a1
+; RV64IA-NEXT: amoadd.d a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: neg a1, a1
+; RV64IA-NEXT: amoadd.d.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: neg a1, a1
+; RV64IA-NEXT: amoadd.d.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: neg a1, a1
+; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_sub_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: neg a1, a1
+; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw sub i64* %a, i64 %b seq_cst
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoand.d a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw and i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoand.d.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw and i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoand.d.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw and i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoand.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw and i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_and_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoand.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw and i64* %a, i64 %b seq_cst
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB185_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d a2, (a0)
+; RV64IA-NEXT: and a3, a2, a1
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: sc.d a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB185_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB186_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d.aq a2, (a0)
+; RV64IA-NEXT: and a3, a2, a1
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: sc.d a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB186_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB187_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d a2, (a0)
+; RV64IA-NEXT: and a3, a2, a1
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: sc.d.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB187_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB188_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d.aq a2, (a0)
+; RV64IA-NEXT: and a3, a2, a1
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: sc.d.rl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB188_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_nand_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: .LBB189_1: # =>This Inner Loop Header: Depth=1
+; RV64IA-NEXT: lr.d.aqrl a2, (a0)
+; RV64IA-NEXT: and a3, a2, a1
+; RV64IA-NEXT: not a3, a3
+; RV64IA-NEXT: sc.d.aqrl a3, a3, (a0)
+; RV64IA-NEXT: bnez a3, .LBB189_1
+; RV64IA-NEXT: # %bb.2:
+; RV64IA-NEXT: mv a0, a2
+; RV64IA-NEXT: ret
%1 = atomicrmw nand i64* %a, i64 %b seq_cst
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoor.d a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw or i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoor.d.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw or i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoor.d.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw or i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoor.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw or i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_or_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoor.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw or i64* %a, i64 %b seq_cst
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoxor.d a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoxor.d.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoxor.d.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoxor.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 8(sp)
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_xor_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amoxor.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw xor i64* %a, i64 %b seq_cst
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomax.d a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw max i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomax.d.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw max i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomax.d.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw max i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomax.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw max i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomax.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw max i64* %a, i64 %b seq_cst
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomin.d a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw min i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomin.d.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw min i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomin.d.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw min i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomin.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw min i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomin.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw min i64* %a, i64 %b seq_cst
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomaxu.d a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomaxu.d.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomaxu.d.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomaxu.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amomaxu.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umax i64* %a, i64 %b seq_cst
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i64_monotonic:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amominu.d a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i64* %a, i64 %b monotonic
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i64_acquire:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amominu.d.aq a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i64* %a, i64 %b acquire
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i64_release:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amominu.d.rl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i64* %a, i64 %b release
ret i64 %1
}
; RV64I-NEXT: ld ra, 56(sp)
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i64_acq_rel:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amominu.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i64* %a, i64 %b acq_rel
ret i64 %1
}
; RV64I-NEXT: ld ra, 40(sp)
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
+;
+; RV64IA-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64IA: # %bb.0:
+; RV64IA-NEXT: amominu.d.aqrl a0, a1, (a0)
+; RV64IA-NEXT: ret
%1 = atomicrmw umin i64* %a, i64 %b seq_cst
ret i64 %1
}