if (CASExpected) {
AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
AllocaCASExpected->setAlignment(AllocaAlignment);
+ unsigned AllocaAS = AllocaCASExpected->getType()->getPointerAddressSpace();
+
AllocaCASExpected_i8 =
- Builder.CreateBitCast(AllocaCASExpected, Type::getInt8PtrTy(Ctx));
+ Builder.CreateBitCast(AllocaCASExpected,
+ Type::getInt8PtrTy(Ctx, AllocaAS));
Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64);
Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment);
Args.push_back(AllocaCASExpected_i8);
if (!CASExpected && HasResult && !UseSizedLibcall) {
AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
AllocaResult->setAlignment(AllocaAlignment);
+ unsigned AllocaAS = AllocaResult->getType()->getPointerAddressSpace();
AllocaResult_i8 =
- Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx));
+ Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS));
Builder.CreateLifetimeStart(AllocaResult_i8, SizeVal64);
Args.push_back(AllocaResult_i8);
}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s
+
+; FIXME: This should not introduce a libcall, much less one to an
+; anonymous function.
+
+define i32 @atomic_load_global_align1(i32 addrspace(1)* %ptr) {
+; GCN-LABEL: @atomic_load_global_align1(
+; GCN-NEXT: [[TMP1:%.*]] = bitcast i32 addrspace(1)* [[PTR:%.*]] to i8 addrspace(1)*
+; GCN-NEXT: [[TMP2:%.*]] = addrspacecast i8 addrspace(1)* [[TMP1]] to i8*
+; GCN-NEXT: [[TMP3:%.*]] = alloca i32, align 4
+; GCN-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to i8*
+; GCN-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP4]])
+; GCN-NEXT: call void @0(i64 4, i8* [[TMP2]], i8* [[TMP4]], i32 5)
+; GCN-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]], align 4
+; GCN-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* [[TMP4]])
+; GCN-NEXT: ret i32 [[TMP5]]
+;
+ %val = load atomic i32, i32 addrspace(1)* %ptr seq_cst, align 1
+ ret i32 %val
+}
+
+define void @atomic_store_global_align1(i32 addrspace(1)* %ptr, i32 %val) {
+; GCN-LABEL: @atomic_store_global_align1(
+; GCN-NEXT: [[TMP1:%.*]] = bitcast i32 addrspace(1)* [[PTR:%.*]] to i8 addrspace(1)*
+; GCN-NEXT: [[TMP2:%.*]] = addrspacecast i8 addrspace(1)* [[TMP1]] to i8*
+; GCN-NEXT: [[TMP3:%.*]] = alloca i32, align 4
+; GCN-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to i8*
+; GCN-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP4]])
+; GCN-NEXT: store i32 [[VAL:%.*]], i32* [[TMP3]], align 4
+; GCN-NEXT: call void @1(i64 4, i8* [[TMP2]], i8* [[TMP4]], i32 0)
+; GCN-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* [[TMP4]])
+; GCN-NEXT: ret void
+;
+ store atomic i32 %val, i32 addrspace(1)* %ptr monotonic, align 1
+ ret void
+}