StoreLoad ? True : False,
StoreStore ? True : False,
Device ? True : False };
- CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier),
- C, C + 5);
+ CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier), C);
}
/// Emit the conversions required to turn the given value into an
// The atomic builtins are also full memory barriers. This is a utility for
// wrapping a call to the builtins with memory barriers.
static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
- Value **ArgBegin, Value **ArgEnd) {
+ ArrayRef<Value *> Args) {
// FIXME: We need a target hook for whether this applies to device memory or
// not.
bool Device = true;
// Create barriers both before and after the call.
EmitMemoryBarrier(CGF, true, true, true, true, Device);
- Value *Result = CGF.Builder.CreateCall(Fn, ArgBegin, ArgEnd);
+ Value *Result = CGF.Builder.CreateCall(Fn, Args);
EmitMemoryBarrier(CGF, true, true, true, true, Device);
return Result;
}
const llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
- llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
Args[1] = EmitToInt(*this, Args[1], T, IntType);
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
- Value *Result = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
+ Value *Result = EmitCallWithBarrier(*this, AtomF, Args);
Result = EmitFromInt(*this, Result, T, ValueType);
return RValue::get(Result);
}
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
Value *OldVal = Args[1];
- Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
+ Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args);
Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
// zext bool to int.
Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
EmitScalarExpr(E->getArg(3)),
EmitScalarExpr(E->getArg(4))
};
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C);
return RValue::get(0);
}
Args.push_back(ArgValue);
}
- Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
+ Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
const llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
else
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
- return Builder.CreateCall(F, Ops.begin(), Ops.end(), name);
+ return Builder.CreateCall(F, Ops, name);
}
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
llvm::StringRef Name = FD->getName();
- return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- Ops.begin(), Ops.end());
+ return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
if (BuiltinID == ARM::BI__builtin_arm_ldrexd) {
// Call the appropriate intrinsic.
Function *F = CGM.getIntrinsic(Int, Ty);
- return Builder.CreateCall(F, Ops.begin(), Ops.end(), "vcvtr");
+ return Builder.CreateCall(F, Ops, "vcvtr");
}
// Determine the type of this overloaded NEON intrinsic.
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld2_lane");
+ Ops[1] = Builder.CreateCall(F,
+ ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
+ "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ops[1] = Builder.CreateCall(F,
+ ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
+ "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ops[1] = Builder.CreateCall(F,
+ ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
+ "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
Args.push_back(CI);
Args.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup");
+ Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Value *Val = Builder.CreateExtractValue(Ops[1], i);
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, Ops, name);
}
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, Ops, name);
}
case X86::BI__builtin_ia32_cmpps: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
+ return Builder.CreateCall(F, Ops, "cmpps");
}
case X86::BI__builtin_ia32_cmpss: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
+ return Builder.CreateCall(F, Ops, "cmpss");
}
case X86::BI__builtin_ia32_ldmxcsr: {
const llvm::Type *PtrTy = Int8PtrTy;
}
case X86::BI__builtin_ia32_cmppd: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
+ return Builder.CreateCall(F, Ops, "cmppd");
}
case X86::BI__builtin_ia32_cmpsd: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
+ return Builder.CreateCall(F, Ops, "cmpsd");
}
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ return Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ return Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, Ops, name);
}
}
}
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ return Builder.CreateCall(F, Ops, "");
}
// vec_st
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ return Builder.CreateCall(F, Ops, "");
}
}
return 0;