// TODO, Obvious Missing Transforms:
// * Narrow width by halfs excluding zero/undef lanes
-static Value *simplifyMaskedLoad(const IntrinsicInst &II,
- InstCombiner::BuilderTy &Builder) {
+Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
Value *LoadPtr = II.getArgOperand(0);
unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
// * Narrow width by halfs excluding zero/undef lanes
// * Vector splat address w/known mask -> scalar load
// * Vector incrementing address -> vector masked load
-static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) {
+Instruction *InstCombiner::simplifyMaskedGather(IntrinsicInst &II) {
return nullptr;
}
break;
}
case Intrinsic::masked_load:
- if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, Builder))
+ if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
return replaceInstUsesWith(CI, SimplifiedMaskedOp);
break;
case Intrinsic::masked_store:
return simplifyMaskedStore(*II);
case Intrinsic::masked_gather:
- return simplifyMaskedGather(*II, *this);
+ return simplifyMaskedGather(*II);
case Intrinsic::masked_scatter:
return simplifyMaskedScatter(*II);
case Intrinsic::launder_invariant_group:
Instruction *transformCallThroughTrampoline(CallBase &Call,
IntrinsicInst &Tramp);
+ Value *simplifyMaskedLoad(IntrinsicInst &II);
Instruction *simplifyMaskedStore(IntrinsicInst &II);
+ Instruction *simplifyMaskedGather(IntrinsicInst &II);
Instruction *simplifyMaskedScatter(IntrinsicInst &II);
/// Transform (zext icmp) to bitwise / integer operations in order to