Summary: Add lowering pattern for llvm.aarch64.neon.vcvtfxs2fp.f16.i64
Reviewers: pbarrio, DavidSpickett, LukeGeeson
Reviewed By: LukeGeeson
Subscribers: javed.absar, kristof.beyls, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60259
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@358171
91177308-0d34-0410-b5e6-
96231b3b80d8
(SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
(SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
+def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
+ (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
(and FPR32:$Rn, (i32 65535)),
vecshiftR16:$imm)),
%0 = trunc i32 %facg to i16
ret i16 %0
}
+
+define dso_local half @vcvth_n_f16_s64_test(i64 %a) {
+; CHECK-LABEL: vcvth_n_f16_s64_test:
+; CHECK: fmov d0, x0
+; CHECK-NEXT: scvtf h0, h0, #16
+; CHECK-NEXT: ret
+entry:
+ %vcvth_n_f16_s64 = tail call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i64(i64 %a, i32 16)
+ ret half %vcvth_n_f16_s64
+}