// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
// difficult for structures with the current code.
- llvm::Value *MemSet = CGF.CGM.getIntrinsic(llvm::Intrinsic::memset_i64);
+ llvm::Value *MemSet = CGF.CGM.getIntrinsic(llvm::Intrinsic::memset);
uint64_t Size = CGF.getContext().getTypeSize(T);
const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
llvm::Function *CodeGenModule::getMemCpyFn() {
if (MemCpyFn) return MemCpyFn;
- llvm::Intrinsic::ID IID;
- switch (Context.Target.getPointerWidth(0)) {
- default: assert(0 && "Unknown ptr width");
- case 16: IID = llvm::Intrinsic::memcpy_i16; break;
- case 32: IID = llvm::Intrinsic::memcpy_i32; break;
- case 64: IID = llvm::Intrinsic::memcpy_i64; break;
- }
- return MemCpyFn = getIntrinsic(IID);
+ return MemCpyFn = getIntrinsic(llvm::Intrinsic::memcpy);
}
llvm::Function *CodeGenModule::getMemMoveFn() {
if (MemMoveFn) return MemMoveFn;
- llvm::Intrinsic::ID IID;
- switch (Context.Target.getPointerWidth(0)) {
- default: assert(0 && "Unknown ptr width");
- case 16: IID = llvm::Intrinsic::memmove_i16; break;
- case 32: IID = llvm::Intrinsic::memmove_i32; break;
- case 64: IID = llvm::Intrinsic::memmove_i64; break;
- }
- return MemMoveFn = getIntrinsic(IID);
+ return MemMoveFn = getIntrinsic(llvm::Intrinsic::memmove);
}
llvm::Function *CodeGenModule::getMemSetFn() {
if (MemSetFn) return MemSetFn;
- llvm::Intrinsic::ID IID;
- switch (Context.Target.getPointerWidth(0)) {
- default: assert(0 && "Unknown ptr width");
- case 16: IID = llvm::Intrinsic::memset_i16; break;
- case 32: IID = llvm::Intrinsic::memset_i32; break;
- case 64: IID = llvm::Intrinsic::memset_i64; break;
- }
- return MemSetFn = getIntrinsic(IID);
+ return MemSetFn = getIntrinsic(llvm::Intrinsic::memset);
}
static void appendFieldAndPadding(CodeGenModule &CGM,