/// accesses. They don't have to be fast, just faster than a function
/// call and a mutex.
static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
- return (arch == llvm::Triple::x86 || arch == llvm::Triple::x86_64);
+ // FIXME: Allow unaligned atomic load/store on x86. (It is not
+ // currently supported by the backend.)
+ return 0;
}
/// Return the maximum size that permits atomic accesses for the given
struct s3 { char c[3]; };
// This structure's size is, so it does, because it can.
+// FIXME: But we don't at the moment; the backend doesn't know how to generate
+// correct code.
struct s4 { char c[4]; };
@interface Test0
// CHECK: call void @objc_copyStruct
// CHECK: define internal i32 @"\01-[Test0 s4]"(
-// CHECK: load atomic i32* {{%.*}} unordered, align 1
+// CHECK: call void @objc_copyStruct
// CHECK: define internal void @"\01-[Test0 setS4:]"(
-// CHECK: store atomic i32 {{%.*}}, i32* {{%.*}} unordered, align 1
+// CHECK: call void @objc_copyStruct