From: Simon Pilgrim Date: Sun, 16 Jul 2017 18:37:23 +0000 (+0000) Subject: Strip trailing whitespace. NFCI X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=02f26df5636dae6d583710fbaa07f41d338a935a;p=llvm Strip trailing whitespace. NFCI git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@308143 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index cc5c09cbf0e..a7a52eb2783 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -1759,29 +1759,29 @@ let Predicates = Preds in { (i64 0)), (COPY_TO_REGCLASS (!cast(InstrStr##rr) _.RC:$src1, _.RC:$src2), NewInf.KRC)>; - + def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), - (_.KVT (OpNode (_.VT _.RC:$src1), + (_.KVT (OpNode (_.VT _.RC:$src1), (_.VT (bitconvert (_.LdFrag addr:$src2))))), (i64 0)), (COPY_TO_REGCLASS (!cast(InstrStr##rm) _.RC:$src1, addr:$src2), NewInf.KRC)>; - + def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), - (_.KVT (and _.KRCWM:$mask, + (_.KVT (and _.KRCWM:$mask, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2)))), (i64 0)), (COPY_TO_REGCLASS (!cast(InstrStr##rrk) _.KRCWM:$mask, _.RC:$src1, _.RC:$src2), NewInf.KRC)>; - + def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), - (_.KVT (and (_.KVT _.KRCWM:$mask), - (_.KVT (OpNode (_.VT _.RC:$src1), - (_.VT (bitconvert + (_.KVT (and (_.KVT _.KRCWM:$mask), + (_.KVT (OpNode (_.VT _.RC:$src1), + (_.VT (bitconvert (_.LdFrag addr:$src2))))))), (i64 0)), - (COPY_TO_REGCLASS (!cast(InstrStr##rmk) _.KRCWM:$mask, + (COPY_TO_REGCLASS (!cast(InstrStr##rmk) _.KRCWM:$mask, _.RC:$src1, addr:$src2), NewInf.KRC)>; } @@ -1798,7 +1798,7 @@ let Predicates = Preds in { (i64 0)), (COPY_TO_REGCLASS (!cast(InstrStr##rmb) _.RC:$src1, addr:$src2), NewInf.KRC)>; - + def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), (_.KVT (and (_.KVT _.KRCWM:$mask), (_.KVT (OpNode (_.VT _.RC:$src1), @@ -1879,7 +1879,7 @@ defm : avx512_icmp_packed_rmb_lowering; -defm : avx512_icmp_packed_rmb_lowering; defm : avx512_icmp_packed_rmb_lowering; @@ -2127,17 +2127,17 @@ multiclass avx512_icmp_cc_packed_lowering Preds> { let Predicates = Preds in { def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), - (_.KVT (OpNode (_.VT _.RC:$src1), - (_.VT _.RC:$src2), + (_.KVT (OpNode (_.VT _.RC:$src1), + (_.VT _.RC:$src2), imm:$cc)), (i64 0)), - (COPY_TO_REGCLASS (!cast(InstrStr##rri) _.RC:$src1, + (COPY_TO_REGCLASS (!cast(InstrStr##rri) _.RC:$src1, _.RC:$src2, imm:$cc), NewInf.KRC)>; - + def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), - (_.KVT (OpNode (_.VT _.RC:$src1), + (_.KVT (OpNode (_.VT _.RC:$src1), (_.VT (bitconvert (_.LdFrag addr:$src2))), imm:$cc)), (i64 0)), @@ -2145,37 +2145,37 @@ let Predicates = Preds in { addr:$src2, imm:$cc), NewInf.KRC)>; - + def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), - (_.KVT (and _.KRCWM:$mask, + (_.KVT (and _.KRCWM:$mask, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), imm:$cc))), (i64 0)), (COPY_TO_REGCLASS (!cast(InstrStr##rrik) _.KRCWM:$mask, - _.RC:$src1, + _.RC:$src1, _.RC:$src2, imm:$cc), NewInf.KRC)>; - + def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), - (_.KVT (and (_.KVT _.KRCWM:$mask), - (_.KVT (OpNode (_.VT _.RC:$src1), - (_.VT (bitconvert + (_.KVT (and (_.KVT _.KRCWM:$mask), + (_.KVT (OpNode (_.VT _.RC:$src1), + (_.VT (bitconvert (_.LdFrag addr:$src2))), imm:$cc)))), (i64 0)), - (COPY_TO_REGCLASS (!cast(InstrStr##rmik) _.KRCWM:$mask, + (COPY_TO_REGCLASS (!cast(InstrStr##rmik) _.KRCWM:$mask, _.RC:$src1, addr:$src2, imm:$cc), NewInf.KRC)>; } } - + multiclass avx512_icmp_cc_packed_rmb_lowering Preds> + list Preds> : avx512_icmp_cc_packed_lowering<_, NewInf, OpNode, InstrStr, Preds> { let Predicates = Preds in { def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), @@ -2187,7 +2187,7 @@ let Predicates = Preds in { addr:$src2, imm:$cc), NewInf.KRC)>; - + def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), (_.KVT (and (_.KVT _.KRCWM:$mask), (_.KVT (OpNode (_.VT _.RC:$src1), @@ -2447,17 +2447,17 @@ multiclass avx512_fcmp_cc_packed_lowering Preds> { let Predicates = Preds in { def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), - (_.KVT (X86cmpm (_.VT _.RC:$src1), - (_.VT _.RC:$src2), + (_.KVT (X86cmpm (_.VT _.RC:$src1), + (_.VT _.RC:$src2), imm:$cc)), (i64 0)), - (COPY_TO_REGCLASS (!cast(InstrStr##rri) _.RC:$src1, + (COPY_TO_REGCLASS (!cast(InstrStr##rri) _.RC:$src1, _.RC:$src2, imm:$cc), NewInf.KRC)>; - + def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), - (_.KVT (X86cmpm (_.VT _.RC:$src1), + (_.KVT (X86cmpm (_.VT _.RC:$src1), (_.VT (bitconvert (_.LdFrag addr:$src2))), imm:$cc)), (i64 0)), @@ -2477,19 +2477,19 @@ let Predicates = Preds in { NewInf.KRC)>; } } - + multiclass avx512_fcmp_cc_packed_sae_lowering Preds> + string InstrStr, list Preds> : avx512_fcmp_cc_packed_lowering<_, NewInf, InstrStr, Preds> { let Predicates = Preds in def : Pat<(insert_subvector (NewInf.KVT immAllZerosV), - (_.KVT (X86cmpmRnd (_.VT _.RC:$src1), - (_.VT _.RC:$src2), + (_.KVT (X86cmpmRnd (_.VT _.RC:$src1), + (_.VT _.RC:$src2), imm:$cc, (i32 FROUND_NO_EXC))), (i64 0)), - (COPY_TO_REGCLASS (!cast(InstrStr##rrib) _.RC:$src1, + (COPY_TO_REGCLASS (!cast(InstrStr##rrib) _.RC:$src1, _.RC:$src2, imm:$cc), NewInf.KRC)>; @@ -2817,16 +2817,16 @@ let Predicates = [HasAVX512] in { def : Pat<(maskVT (scalar_to_vector GR32:$src)), (COPY_TO_REGCLASS GR32:$src, maskRC)>; - def : Pat<(i32 (X86Vextract maskRC:$src, (iPTR 0))), + def : Pat<(i32 (X86Vextract maskRC:$src, (iPTR 0))), (COPY_TO_REGCLASS maskRC:$src, GR32)>; def : Pat<(maskVT (scalar_to_vector GR8:$src)), (COPY_TO_REGCLASS (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, sub_8bit), maskRC)>; - def : Pat<(i8 (X86Vextract maskRC:$src, (iPTR 0))), + def : Pat<(i8 (X86Vextract maskRC:$src, (iPTR 0))), (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS maskRC:$src, GR32)), sub_8bit)>; - def : Pat<(i32 (anyext (i8 (X86Vextract maskRC:$src, (iPTR 0))))), + def : Pat<(i32 (anyext (i8 (X86Vextract maskRC:$src, (iPTR 0))))), (COPY_TO_REGCLASS maskRC:$src, GR32)>; } @@ -3036,7 +3036,7 @@ def : Pat<(v8i1 (OpNode (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))), (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)), (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm))), VK8)>; -def : Pat<(insert_subvector (v16i1 immAllZerosV), +def : Pat<(insert_subvector (v16i1 immAllZerosV), (v8i1 (OpNode (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))), (i64 0)), (KSHIFTRWri (KSHIFTLWri (!cast(InstStr##Zrr) @@ -3044,8 +3044,8 @@ def : Pat<(insert_subvector (v16i1 immAllZerosV), (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm))), (i8 8)), (i8 8))>; -def : Pat<(insert_subvector (v16i1 immAllZerosV), - (v8i1 (and VK8:$mask, +def : Pat<(insert_subvector (v16i1 immAllZerosV), + (v8i1 (and VK8:$mask, (OpNode (v8i32 VR256X:$src1), (v8i32 VR256X:$src2)))), (i64 0)), (KSHIFTRWri (KSHIFTLWri (!cast(InstStr##Zrrk) @@ -3063,7 +3063,7 @@ def : Pat<(v8i1 (OpNode (_.info256.VT VR256X:$src1), (_.info256.VT VR256X:$src2) (_.info512.VT (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm)), imm:$cc), VK8)>; -def : Pat<(insert_subvector (v16i1 immAllZerosV), +def : Pat<(insert_subvector (v16i1 immAllZerosV), (v8i1 (OpNode (_.info256.VT VR256X:$src1), (_.info256.VT VR256X:$src2), imm:$cc)), (i64 0)), (KSHIFTRWri (KSHIFTLWri (!cast(InstStr##Zrri) @@ -3072,8 +3072,8 @@ def : Pat<(insert_subvector (v16i1 immAllZerosV), imm:$cc), (i8 8)), (i8 8))>; -def : Pat<(insert_subvector (v16i1 immAllZerosV), - (v8i1 (and VK8:$mask, +def : Pat<(insert_subvector (v16i1 immAllZerosV), + (v8i1 (and VK8:$mask, (OpNode (_.info256.VT VR256X:$src1), (_.info256.VT VR256X:$src2), imm:$cc))), (i64 0)), (KSHIFTRWri (KSHIFTLWri (!cast(InstStr##Zrrik) @@ -3379,35 +3379,35 @@ defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512, defm VMOVDQA32 : avx512_alignedload_vl<0x6F, "vmovdqa32", avx512vl_i32_info, HasAVX512>, avx512_alignedstore_vl<0x7F, "vmovdqa32", avx512vl_i32_info, - HasAVX512, "VMOVDQA32">, + HasAVX512, "VMOVDQA32">, PD, EVEX_CD8<32, CD8VF>; defm VMOVDQA64 : avx512_alignedload_vl<0x6F, "vmovdqa64", avx512vl_i64_info, HasAVX512>, avx512_alignedstore_vl<0x7F, "vmovdqa64", avx512vl_i64_info, - HasAVX512, "VMOVDQA64">, + HasAVX512, "VMOVDQA64">, PD, VEX_W, EVEX_CD8<64, CD8VF>; defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", avx512vl_i8_info, HasBWI>, avx512_store_vl<0x7F, "vmovdqu8", avx512vl_i8_info, - HasBWI, "VMOVDQU8">, + HasBWI, "VMOVDQU8">, XD, EVEX_CD8<8, CD8VF>; defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", avx512vl_i16_info, HasBWI>, avx512_store_vl<0x7F, "vmovdqu16", avx512vl_i16_info, - HasBWI, "VMOVDQU16">, + HasBWI, "VMOVDQU16">, XD, VEX_W, EVEX_CD8<16, CD8VF>; defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512, null_frag>, avx512_store_vl<0x7F, "vmovdqu32", avx512vl_i32_info, - HasAVX512, "VMOVDQU32">, + HasAVX512, "VMOVDQU32">, XS, EVEX_CD8<32, CD8VF>; defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512, null_frag>, avx512_store_vl<0x7F, "vmovdqu64", avx512vl_i64_info, - HasAVX512, "VMOVDQU64">, + HasAVX512, "VMOVDQU64">, XS, VEX_W, EVEX_CD8<64, CD8VF>; // Special instructions to help with spilling when we don't have VLX. We need @@ -3964,49 +3964,49 @@ def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask), (COPY_TO_REGCLASS VR128X:$src, FR32X))>; let hasSideEffects = 0 in { - def VMOVSSZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), + def VMOVSSZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), (ins VR128X:$src1, FR32X:$src2), "vmovss.s\t{$src2, $src1, $dst|$dst, $src1, $src2}", [], NoItinerary>, XS, EVEX_4V, VEX_LIG, FoldGenData<"VMOVSSZrr">; let Constraints = "$src0 = $dst" in - def VMOVSSZrrk_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), - (ins f32x_info.RC:$src0, f32x_info.KRCWM:$mask, + def VMOVSSZrrk_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), + (ins f32x_info.RC:$src0, f32x_info.KRCWM:$mask, VR128X:$src1, FR32X:$src2), "vmovss.s\t{$src2, $src1, $dst {${mask}}|"# "$dst {${mask}}, $src1, $src2}", [], NoItinerary>, EVEX_K, XS, EVEX_4V, VEX_LIG, FoldGenData<"VMOVSSZrrk">; - - def VMOVSSZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), + + def VMOVSSZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), (ins f32x_info.KRCWM:$mask, VR128X:$src1, FR32X:$src2), "vmovss.s\t{$src2, $src1, $dst {${mask}} {z}|"# "$dst {${mask}} {z}, $src1, $src2}", [], NoItinerary>, EVEX_KZ, XS, EVEX_4V, VEX_LIG, FoldGenData<"VMOVSSZrrkz">; - def VMOVSDZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), + def VMOVSDZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), (ins VR128X:$src1, FR64X:$src2), "vmovsd.s\t{$src2, $src1, $dst|$dst, $src1, $src2}", [], NoItinerary>, XD, EVEX_4V, VEX_LIG, VEX_W, FoldGenData<"VMOVSDZrr">; let Constraints = "$src0 = $dst" in - def VMOVSDZrrk_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), - (ins f64x_info.RC:$src0, f64x_info.KRCWM:$mask, + def VMOVSDZrrk_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), + (ins f64x_info.RC:$src0, f64x_info.KRCWM:$mask, VR128X:$src1, FR64X:$src2), "vmovsd.s\t{$src2, $src1, $dst {${mask}}|"# "$dst {${mask}}, $src1, $src2}", [], NoItinerary>, EVEX_K, XD, EVEX_4V, VEX_LIG, - VEX_W, FoldGenData<"VMOVSDZrrk">; + VEX_W, FoldGenData<"VMOVSDZrrk">; - def VMOVSDZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), - (ins f64x_info.KRCWM:$mask, VR128X:$src1, + def VMOVSDZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst), + (ins f64x_info.KRCWM:$mask, VR128X:$src1, FR64X:$src2), "vmovsd.s\t{$src2, $src1, $dst {${mask}} {z}|"# "$dst {${mask}} {z}, $src1, $src2}", - [], NoItinerary>, EVEX_KZ, XD, EVEX_4V, VEX_LIG, + [], NoItinerary>, EVEX_KZ, XD, EVEX_4V, VEX_LIG, VEX_W, FoldGenData<"VMOVSDZrrkz">; }