if (CmpInst::isIntPredicate(Pred))
MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
else if (Pred == CmpInst::FCMP_FALSE)
- MIRBuilder.buildConstant(Res, 0);
- else if (Pred == CmpInst::FCMP_TRUE)
- MIRBuilder.buildConstant(Res, 1);
+ MIRBuilder.buildCopy(
+ Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
+ else if (Pred == CmpInst::FCMP_TRUE)
+ MIRBuilder.buildCopy(
+ Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
else
MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
Value &Op0 = *U.getOperand(0);
unsigned BaseReg = getOrCreateVReg(Op0);
- LLT PtrTy = getLLTForType(*Op0.getType(), *DL);
- unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
- LLT OffsetTy = LLT::scalar(PtrSize);
+ Type *PtrIRTy = Op0.getType();
+ LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
+ Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
+ LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
int64_t Offset = 0;
for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
if (Offset != 0) {
unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
- unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
- MIRBuilder.buildConstant(OffsetReg, Offset);
+ unsigned OffsetReg =
+ getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
BaseReg = NewBaseReg;
}
// N = N + Idx * ElementSize;
- unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
- MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
+ unsigned ElementSizeReg =
+ getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
unsigned IdxReg = getOrCreateVReg(*Idx);
if (MRI->getType(IdxReg) != OffsetTy) {
}
if (Offset != 0) {
- unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
- MIRBuilder.buildConstant(OffsetReg, Offset);
+ unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
return true;
}
.addUse(getOrCreateVReg(*CI.getOperand(1)));
if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
- unsigned Zero = MRI->createGenericVirtualRegister(s1);
- EntryBuilder.buildConstant(Zero, 0);
+ unsigned Zero = getOrCreateVReg(
+ *Constant::getNullValue(Type::getInt1Ty(CI.getContext())));
MIB.addUse(Zero);
}
unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
- LLT IntPtrTy = LLT::scalar(DL->getPointerSizeInBits());
+ Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
+ LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
if (MRI->getType(NumElts) != IntPtrTy) {
unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
}
unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
- unsigned TySize = MRI->createGenericVirtualRegister(IntPtrTy);
- MIRBuilder.buildConstant(TySize, -DL->getTypeAllocSize(Ty));
+ unsigned TySize =
+ getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
MIRBuilder.buildMul(AllocSize, NumElts, TySize);
LLT PtrTy = getLLTForType(*AI.getType(), *DL);
; CHECK-LABEL: name: test_simple_alloca
; CHECK: [[NUMELTS:%[0-9]+]](s32) = COPY %w0
-; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[TYPE_SIZE:%[0-9]+]](s64) = G_CONSTANT i64 -1
+; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[NUMBYTES:%[0-9]+]](s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
; CHECK: [[SP_TMP:%[0-9]+]](p0) = COPY %sp
; CHECK: [[ALLOC:%[0-9]+]](p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
; CHECK-LABEL: name: test_aligned_alloca
; CHECK: [[NUMELTS:%[0-9]+]](s32) = COPY %w0
-; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[TYPE_SIZE:%[0-9]+]](s64) = G_CONSTANT i64 -1
+; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[NUMBYTES:%[0-9]+]](s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
; CHECK: [[SP_TMP:%[0-9]+]](p0) = COPY %sp
; CHECK: [[ALLOC:%[0-9]+]](p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
; CHECK-LABEL: name: test_natural_alloca
; CHECK: [[NUMELTS:%[0-9]+]](s32) = COPY %w0
-; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[TYPE_SIZE:%[0-9]+]](s64) = G_CONSTANT i64 -16
+; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[NUMBYTES:%[0-9]+]](s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
; CHECK: [[SP_TMP:%[0-9]+]](p0) = COPY %sp
; CHECK: [[ALLOC:%[0-9]+]](p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[IDX:%[0-9]+]](s64) = COPY %x1
; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_CONSTANT i64 272
-; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
; CHECK: [[SIZE:%[0-9]+]](s64) = G_CONSTANT i64 4
+; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_MUL [[SIZE]], [[IDX]]
; CHECK: [[BASE2:%[0-9]+]](p0) = G_GEP [[BASE1]], [[OFFSET2]](s64)
; CHECK: [[RES:%[0-9]+]](p0) = COPY [[BASE2]](p0)
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[IDX:%[0-9]+]](s64) = COPY %x1
; CHECK: [[SIZE:%[0-9]+]](s64) = G_CONSTANT i64 64
+; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_CONSTANT i64 40
; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_MUL [[SIZE]], [[IDX]]
; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
-; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_CONSTANT i64 40
; CHECK: [[BASE2:%[0-9]+]](p0) = G_GEP [[BASE1]], [[OFFSET2]](s64)
; CHECK: %x0 = COPY [[BASE2]](p0)