return LV;
}
- // We need to compute the bit offset for the bit-field, the offset is to the
- // byte. Note, there is a subtle invariant here: we can only call this routine
- // on non-synthesized ivars but we may be called for synthesized ivars.
- // However, a synthesized ivar can never be a bit-field, so this is safe.
- uint64_t BitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar) % 8;
+ // We need to compute an access strategy for this bit-field. We are given the
+ // offset to the first byte in the bit-field, the sub-byte offset is taken
+ // from the original layout. We reuse the normal bit-field access strategy by
+ // treating this as an access to a struct where the bit-field is in byte 0,
+ // and adjust the containing type size as appropriate.
+ //
+ // FIXME: Note that currently we make a very conservative estimate of the
+ // alignment of the bit-field, because (a) it is not clear what guarantees the
+ // runtime makes us, and (b) we don't have a way to specify that the struct is
+ // at an alignment plus offset.
+ //
+ // Note, there is a subtle invariant here: we can only call this routine on
+ // non-synthesized ivars but we may be called for synthesized ivars. However,
+ // a synthesized ivar can never be a bit-field, so this is safe.
+ const ASTRecordLayout &RL =
+ CGF.CGM.getContext().getASTObjCInterfaceLayout(OID);
+ uint64_t TypeSizeInBits = RL.getSize();
+ uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
+ uint64_t BitOffset = FieldBitOffset % 8;
+ uint64_t ContainingTypeAlign = 8;
+ uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
uint64_t BitFieldSize =
Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
// layout object. However, this is blocked on other cleanups to the
// Objective-C code, so for now we just live with allocating a bunch of these
// objects.
+ CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
+ CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
+ ContainingTypeSize, ContainingTypeAlign));
- // We always construct a single, possibly unaligned, access for this case.
- CGBitFieldInfo::AccessInfo AI;
- AI.FieldIndex = 0;
- AI.FieldByteOffset = 0;
- AI.FieldBitStart = BitOffset;
- AI.AccessWidth = CGF.CGM.getContext().getTypeSize(IvarTy);
- AI.AccessAlignment = 0;
- AI.TargetBitOffset = 0;
- AI.TargetBitWidth = BitFieldSize;
-
- CGBitFieldInfo *Info =
- new (CGF.CGM.getContext()) CGBitFieldInfo(BitFieldSize, 1, &AI,
- IvarTy->isSignedIntegerType());
-
- // FIXME: We need to set a very conservative alignment on this, or make sure
- // that the runtime is doing the right thing.
return LValue::MakeBitfield(V, *Info,
IvarTy.getCVRQualifiers() | CVRQualifiers);
}
--- /dev/null
+// RUN: %clang_cc1 -triple i386-apple-darwin10 -emit-llvm -o %t %s
+// RUN: FileCheck -check-prefix=CHECK-I386 < %t %s
+// RUN: %clang_cc1 -triple armv6-apple-darwin10 -target-abi apcs-gnu -emit-llvm -o %t %s
+// RUN: FileCheck -check-prefix=CHECK-ARM < %t %s
+
+@interface I0 {
+@public
+ unsigned x:15;
+ unsigned y: 1;
+}
+@end
+
+// Check that we don't try to use an i32 load here, which would reach beyond the
+// end of the structure.
+//
+// CHECK-I386: define i32 @f0(
+// CHECK-I386: [[t0_0:%.*]] = load i16* {{.*}}, align 1
+// CHECK-I386: lshr i16 [[t0_0]], 7
+// CHECK-I386: }
+int f0(I0 *a) {
+ return a->y;
+}
+
+// Check that we can handled straddled loads.
+//
+// CHECK-ARM: define i32 @f1(
+// CHECK-ARM: [[t1_ptr:%.*]] = getelementptr
+// CHECK-ARM: [[t1_base:%.*]] = bitcast i8* [[t1_ptr]] to i32*
+// CHECK-ARM: [[t1_0:%.*]] = load i32* [[t1_base]], align 1
+// CHECK-ARM: lshr i32 [[t1_0]], 1
+// CHECK-ARM: [[t1_base_2_cast:%.*]] = bitcast i32* %1 to i8*
+// CHECK-ARM: [[t1_base_2:%.*]] = getelementptr i8* [[t1_base_2_cast]]
+// CHECK-ARM: [[t1_1:%.*]] = load i8* [[t1_base_2]], align 1
+// CHECK-ARM: and i8 [[t1_1:%.*]], 1
+// CHECK-ARM: }
+@interface I1 {
+@public
+ unsigned x: 1;
+ unsigned y:32;
+}
+@end
+
+int f1(I1 *a) { return a->y; }