A future change will cause this byte offset to be inttoptr'd and then exported
via an absolute symbol. On the importing end we will expect the symbol to be
in range [0,2^32) so that it will fit into a 32-bit relocation. The problem
is that on 64-bit architectures if the offset is negative it will not be in
the correct range once we inttoptr it.
This change causes us to use a 32-bit integer so that it can be inttoptr'd
(which zero extends) into the correct range.
Differential Revision: https://reviews.llvm.org/D30016
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@295487
91177308-0d34-0410-b5e6-
96231b3b80d8
Target.WasDevirt = true;
// Rewrite each call to a load from OffsetByte/OffsetBit.
- Constant *ByteConst = ConstantInt::get(Int64Ty, OffsetByte);
+ Constant *ByteConst = ConstantInt::get(Int32Ty, OffsetByte);
Constant *BitConst = ConstantInt::get(Int8Ty, 1ULL << OffsetBit);
applyVirtualConstProp(CSByConstantArg.second,
TargetsForSlot[0].Fn->getName(), ByteConst, BitConst);
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 0
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i64 -5
+ ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i32 -5
; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, i8* [[VTGEP1]]
; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 2
; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 1
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i64 -5
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i32 -5
; CHECK: [[VTLOAD2:%[^ ]*]] = load i8, i8* [[VTGEP2]]
; CHECK: [[VTAND2:%[^ ]*]] = and i8 [[VTLOAD2]], 1
; CHECK: [[VTCMP2:%[^ ]*]] = icmp ne i8 [[VTAND2]], 0
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 2
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i32 (i8*)*
- ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i64 -4
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i32 -4
; CHECK: [[VTBC3:%[^ ]*]] = bitcast i8* [[VTGEP3]] to i32*
; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, i32* [[VTBC3]]
%result = call i32 %fptr_casted(i8* %obj)
%pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 0, metadata !"typeid")
%fptr = extractvalue {i8*, i1} %pair, 0
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i64 -5
+ ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i32 -5
; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, i8* [[VTGEP1]]
; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 2
; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
%pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 8, metadata !"typeid")
%fptr = extractvalue {i8*, i1} %pair, 0
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i64 -5
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i32 -5
; CHECK: [[VTLOAD2:%[^ ]*]] = load i8, i8* [[VTGEP2]]
; CHECK: [[VTAND2:%[^ ]*]] = and i8 [[VTLOAD2]], 1
; CHECK: [[VTCMP2:%[^ ]*]] = icmp ne i8 [[VTAND2]], 0
%pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 16, metadata !"typeid")
%fptr = extractvalue {i8*, i1} %pair, 0
%fptr_casted = bitcast i8* %fptr to i32 (i8*)*
- ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i64 -4
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i32 -4
; CHECK: [[VTBC3:%[^ ]*]] = bitcast i8* [[VTGEP3]] to i32*
; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, i32* [[VTBC3]]
%result = call i32 %fptr_casted(i8* %obj)
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 0
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i64 28
+ ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i32 28
; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, i8* [[VTGEP1]]
; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 2
; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 1
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i1 (i8*)*
- ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i64 28
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i32 28
; CHECK: [[VTLOAD2:%[^ ]*]] = load i8, i8* [[VTGEP2]]
; CHECK: [[VTAND2:%[^ ]*]] = and i8 [[VTLOAD2]], 1
; CHECK: [[VTCMP2:%[^ ]*]] = icmp ne i8 [[VTAND2]], 0
%fptrptr = getelementptr [3 x i8*], [3 x i8*]* %vtable, i32 0, i32 2
%fptr = load i8*, i8** %fptrptr
%fptr_casted = bitcast i8* %fptr to i32 (i8*)*
- ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i64 24
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i32 24
; CHECK: [[VTBC3:%[^ ]*]] = bitcast i8* [[VTGEP3]] to i32*
; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, i32* [[VTBC3]]
%result = call i32 %fptr_casted(i8* %obj)