def : ReadAdvance<ReadAfterLd, 4>;
// (a folded load is an instruction that loads and does some operation)
-// Ex: ADDPD xmm,[mem]-> This instruction has two micro-ops
+// Ex: ADDPD xmm,[mem]-> This instruction has two micro-ops
// Instructions with folded loads are usually micro-fused, so they only appear
// as two micro-ops.
// a. load and
// Register variant takes 1-cycle on Execution Port.
def : WriteRes<SchedRW, [ExePort]> { let Latency = Lat; }
- // Memory variant also uses a cycle on ZnAGU
+ // Memory variant also uses a cycle on ZnAGU
// adds 4 cycles to the latency.
def : WriteRes<SchedRW.Folded, [ZnAGU, ExePort]> {
+ let NumMicroOps = 2;
let Latency = !add(Lat, 4);
}
}
}
}
-// WriteRMW is set for instructions with Memory write
+// WriteRMW is set for instructions with Memory write
// operation in codegen
def : WriteRes<WriteRMW, [ZnAGU]>;
def : WriteRes<WritePCmpIStrI, []>;
def : WriteRes<WritePCmpIStrILd, []>;
}
+
+//=== Regex based itineraries ===//
+// Notation:
+// - r: register.
+// - m = memory.
+// - i = immediate
+// - mm: 64 bit mmx register.
+// - x = 128 bit xmm register.
+// - (x)mm = mmx or xmm register.
+// - y = 256 bit ymm register.
+// - v = any vector register.
+
+//=== Integer Instructions ===//
+//-- Move instructions --//
+// MOV.
+// r16,m.
+def : InstRW<[WriteALULd, ReadAfterLd], (instregex "MOV16rm")>;
+
+// MOVSX, MOVZX.
+// r,m.
+def : InstRW<[WriteLoad], (instregex "MOV(S|Z)X32rm(8|16)")>;
+
+// CMOVcc.
+// r,r.
+def : InstRW<[WriteALU],
+ (instregex "CMOV(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)(16|32|64)rr")>;
+// r,m.
+def : InstRW<[WriteALULd, ReadAfterLd],
+ (instregex "CMOV(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)(16|32|64)rm")>;
+
+// XCHG.
+// r,r.
+def ZnWriteXCHG : SchedWriteRes<[ZnALU]> {
+ let NumMicroOps = 2;
+ let ResourceCycles = [2];
+}
+
+def : InstRW<[ZnWriteXCHG], (instregex "XCHG(8|16|32|64)rr", "XCHG(16|32|64)ar")>;
+
+// r,m.
+def ZnWriteXCHGrm : SchedWriteRes<[ZnAGU, ZnALU]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteXCHGrm, ReadAfterLd], (instregex "XCHG(8|16|32|64)rm")>;
+
+def : InstRW<[WriteMicrocoded], (instregex "XLAT")>;
+
+// POP16.
+// r.
+def ZnWritePop16r : SchedWriteRes<[ZnAGU]>{
+ let Latency = 5;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWritePop16r], (instregex "POP16rmm")>;
+def : InstRW<[WriteMicrocoded], (instregex "POPF(16|32)")>;
+def : InstRW<[WriteMicrocoded], (instregex "POPA(16|32)")>;
+
+
+// PUSH.
+// r. Has default values.
+// m.
+def ZnWritePUSH : SchedWriteRes<[ZnAGU]>{
+ let Latency = 4;
+}
+def : InstRW<[ZnWritePUSH], (instregex "PUSH(16|32)rmm")>;
+
+//PUSHF
+def : InstRW<[WriteMicrocoded], (instregex "PUSHF(16|32)")>;
+
+// PUSHA.
+def ZnWritePushA : SchedWriteRes<[ZnAGU]> {
+ let Latency = 8;
+}
+def : InstRW<[ZnWritePushA], (instregex "PUSHA(16|32)")>;
+
+//LAHF
+def : InstRW<[WriteMicrocoded], (instregex "LAHF")>;
+
+// SAHF.
+def ZnWriteSAHF : SchedWriteRes<[ZnALU]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteSAHF], (instregex "SAHF")>;
+
+// BSWAP.
+def ZnWriteBSwap : SchedWriteRes<[ZnALU]> {
+ let ResourceCycles = [4];
+}
+def : InstRW<[ZnWriteBSwap], (instregex "BSWAP")>;
+
+// MOVBE.
+// r,m.
+def ZnWriteMOVBE : SchedWriteRes<[ZnAGU, ZnALU]> {
+ let Latency = 5;
+}
+def : InstRW<[ZnWriteMOVBE, ReadAfterLd], (instregex "MOVBE(16|32|64)rm")>;
+
+// m16,r16.
+def : InstRW<[ZnWriteMOVBE], (instregex "MOVBE(16|32|64)mr")>;
+
+//-- Arithmetic instructions --//
+
+// ADD SUB.
+// m,r/i.
+def : InstRW<[WriteALULd], (instregex "(ADD|SUB)(8|16|32|64)m(r|i)",
+ "(ADD|SUB)(8|16|32|64)mi8",
+ "(ADD|SUB)64mi32")>;
+
+// ADC SBB.
+// r,r/i.
+def : InstRW<[WriteALU], (instregex "(ADC|SBB)(8|16|32|64)r(r|i)",
+ "(ADC|SBB)(16|32|64)ri8",
+ "(ADC|SBB)64ri32",
+ "(ADC|SBB)(8|16|32|64)rr_REV")>;
+
+// r,m.
+def : InstRW<[WriteALULd, ReadAfterLd],
+ (instregex "(ADC|SBB)(8|16|32|64)rm")>;
+
+// m,r/i.
+def : InstRW<[WriteALULd],
+ (instregex "(ADC|SBB)(8|16|32|64)m(r|i)",
+ "(ADC|SBB)(16|32|64)mi8",
+ "(ADC|SBB)64mi32")>;
+
+// INC DEC NOT NEG.
+// m.
+def : InstRW<[WriteALULd],
+ (instregex "(INC|DEC|NOT|NEG)(8|16|32|64)m",
+ "(INC|DEC)64(16|32)m")>;
+
+// MUL IMUL.
+// r16.
+def ZnWriteMul16 : SchedWriteRes<[ZnALU1, ZnMultiplier]> {
+ let Latency = 3;
+}
+def : InstRW<[ZnWriteMul16], (instregex "IMUL16r", "MUL16r")>;
+
+// m16.
+def ZnWriteMul16Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+ let Latency = 8;
+}
+def : InstRW<[ZnWriteMul16Ld, ReadAfterLd], (instregex "IMUL16m", "MUL16m")>;
+
+// r32.
+def ZnWriteMul32 : SchedWriteRes<[ZnALU1, ZnMultiplier]> {
+ let Latency = 3;
+}
+def : InstRW<[ZnWriteMul32], (instregex "IMUL32r", "MUL32r")>;
+
+// m32.
+def ZnWriteMul32Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+ let Latency = 8;
+}
+def : InstRW<[ZnWriteMul32Ld, ReadAfterLd], (instregex "IMUL32m", "MUL32m")>;
+
+// r64.
+def ZnWriteMul64 : SchedWriteRes<[ZnALU1, ZnMultiplier]> {
+ let Latency = 4;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteMul64], (instregex "IMUL64r", "MUL64r")>;
+
+// m64.
+def ZnWriteMul64Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+ let Latency = 9;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteMul64Ld, ReadAfterLd], (instregex "IMUL64m", "MUL64m")>;
+
+// r16,r16.
+def ZnWriteMul16rri : SchedWriteRes<[ZnALU1, ZnMultiplier]> {
+ let Latency = 3;
+}
+def : InstRW<[ZnWriteMul16rri], (instregex "IMUL16rri", "IMUL16rri8")>;
+
+// r16,m16.
+def ZnWriteMul16rmi : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+ let Latency = 8;
+}
+def : InstRW<[ZnWriteMul16rmi, ReadAfterLd], (instregex "IMUL16rmi", "IMUL16rmi8")>;
+
+// MULX.
+// r32,r32,r32.
+def ZnWriteMulX32 : SchedWriteRes<[ZnALU1, ZnMultiplier]> {
+ let Latency = 3;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteMulX32], (instregex "MULX32rr")>;
+
+// r32,r32,m32.
+def ZnWriteMulX32Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+ let Latency = 8;
+ let ResourceCycles = [1, 2, 2];
+}
+def : InstRW<[ZnWriteMulX32Ld, ReadAfterLd], (instregex "MULX32rm")>;
+
+// r64,r64,r64.
+def ZnWriteMulX64 : SchedWriteRes<[ZnALU1]> {
+ let Latency = 3;
+}
+def : InstRW<[ZnWriteMulX64], (instregex "MULX64rr")>;
+
+// r64,r64,m64.
+def ZnWriteMulX64Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+ let Latency = 8;
+}
+def : InstRW<[ZnWriteMulX64Ld, ReadAfterLd], (instregex "MULX64rm")>;
+
+// DIV, IDIV.
+// r8.
+def ZnWriteDiv8 : SchedWriteRes<[ZnALU2, ZnDivider]> {
+ let Latency = 15;
+}
+def : InstRW<[ZnWriteDiv8], (instregex "DIV8r", "IDIV8r")>;
+
+// r16.
+def ZnWriteDiv16 : SchedWriteRes<[ZnALU2, ZnDivider]> {
+ let Latency = 17;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteDiv16], (instregex "DIV16r", "IDIV16r")>;
+
+// r32.
+def ZnWriteDiv32 : SchedWriteRes<[ZnALU2, ZnDivider]> {
+ let Latency = 25;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteDiv32], (instregex "DIV32r", "IDIV32r")>;
+
+// r64.
+def ZnWriteDiv64 : SchedWriteRes<[ZnALU2, ZnDivider]> {
+ let Latency = 41;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteDiv64], (instregex "DIV64r", "IDIV64r")>;
+
+//-- Control transfer instructions --//
+
+// J(E|R)CXZ.
+def ZnWriteJCXZ : SchedWriteRes<[ZnALU03]>;
+def : InstRW<[ZnWriteJCXZ], (instregex "JCXZ", "JECXZ_(32|64)", "JRCXZ")>;
+
+// INTO
+def : InstRW<[WriteMicrocoded], (instregex "INTO")>;
+
+// LOOP.
+def ZnWriteLOOP : SchedWriteRes<[ZnALU03]>;
+def : InstRW<[ZnWriteLOOP], (instregex "LOOP")>;
+
+// LOOP(N)E, LOOP(N)Z
+def ZnWriteLOOPE : SchedWriteRes<[ZnALU03]>;
+def : InstRW<[ZnWriteLOOPE], (instregex "LOOPE", "LOOPNE",
+ "LOOPZ", "LOOPNZ")>;
+
+// CALL.
+// r.
+def ZnWriteCALLr : SchedWriteRes<[ZnAGU, ZnALU03]>;
+def : InstRW<[ZnWriteCALLr], (instregex "CALL(16|32)r")>;
+
+def : InstRW<[WriteMicrocoded], (instregex "CALL(16|32)m")>;
+
+// RET.
+def ZnWriteRET : SchedWriteRes<[ZnALU03]> {
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteRET], (instregex "RET(L|Q|W)", "LRET(L|Q|W)",
+ "IRET(D|Q)", "RETF")>;
+
+//-- Logic instructions --//
+
+// AND OR XOR.
+// m,r/i.
+def : InstRW<[WriteALULd],
+ (instregex "(AND|OR|XOR)(8|16|32|64)m(r|i)",
+ "(AND|OR|XOR)(8|16|32|64)mi8", "(AND|OR|XOR)64mi32")>;
+
+// ANDN.
+// r,r.
+def : InstRW<[WriteALU], (instregex "ANDN(32|64)rr")>;
+// r,m.
+def : InstRW<[WriteALULd, ReadAfterLd], (instregex "ANDN(32|64)rm")>;
+
+// Define ALU latency variants
+def ZnWriteALULat2 : SchedWriteRes<[ZnALU]> {
+ let Latency = 2;
+}
+def ZnWriteALULat2Ld : SchedWriteRes<[ZnAGU, ZnALU]> {
+ let Latency = 6;
+}
+
+def ZnWriteALULat3 : SchedWriteRes<[ZnALU]> {
+ let Latency = 3;
+}
+def ZnWriteALULat3Ld : SchedWriteRes<[ZnAGU, ZnALU]> {
+ let Latency = 7;
+}
+
+// BSF BSR.
+// r,r.
+def : InstRW<[ZnWriteALULat3], (instregex "BS(R|F)(16|32|64)rr")>;
+// r,m.
+def : InstRW<[ZnWriteALULat3Ld, ReadAfterLd], (instregex "BS(R|F)(16|32|64)rm")>;
+
+// BT.
+// r,r/i.
+def : InstRW<[WriteShift], (instregex "BT(16|32|64)r(r|i8)")>;
+
+def : InstRW<[WriteShiftLd], (instregex "BT(16|32|64)mr")>;
+def : InstRW<[WriteShiftLd], (instregex "BT(16|32|64)mi8")>;
+
+// BTR BTS BTC.
+// r,r,i.
+def ZnWriteBTRSC : SchedWriteRes<[ZnALU]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteBTRSC], (instregex "BT(R|S|C)(16|32|64)r(r|i8)")>;
+
+
+// m,r,i.
+def ZnWriteBTRSCm : SchedWriteRes<[ZnAGU, ZnALU]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+}
+// m,r,i.
+def : InstRW<[ZnWriteBTRSCm], (instregex "BT(R|S|C)(16|32|64)m(r|i8)")>;
+
+// BLSI BLSMSK BLSR.
+// r,r.
+def : InstRW<[ZnWriteALULat2], (instregex "BLS(I|MSK|R)(32|64)rr")>;
+// r,m.
+def : InstRW<[ZnWriteALULat2Ld, ReadAfterLd], (instregex "BLS(I|MSK|R)(32|64)rm")>;
+
+// BEXTR.
+// r,r,r.
+def : InstRW<[WriteALU], (instregex "BEXTR(32|64)rr")>;
+// r,m,r.
+def : InstRW<[WriteALULd, ReadAfterLd], (instregex "BEXTR(32|64)rm")>;
+
+// BZHI.
+// r,r,r.
+def : InstRW<[WriteALU], (instregex "BZHI(32|64)rr")>;
+// r,m,r.
+def : InstRW<[WriteALULd, ReadAfterLd], (instregex "BZHI(32|64)rm")>;
+
+// CLD STD.
+def : InstRW<[WriteALU], (instregex "STD", "CLD")>;
+
+// PDEP PEXT.
+// r,r,r.
+def : InstRW<[WriteMicrocoded], (instregex "PDEP(32|64)rr", "PEXT(32|64)rr")>;
+// r,m,r.
+def : InstRW<[WriteMicrocoded], (instregex "PDEP(32|64)rm", "PEXT(32|64)rm")>;
+
+// ROR ROL.
+def : InstRW<[WriteShift], (instregex "RO(R|L)(8|16|32|64)r1")>;
+
+// RCR RCL.
+// r,1.
+def : InstRW<[WriteShift], (instregex "RC(R|L)(8|16|32|64)r1")>;
+
+// m,1.
+def : InstRW<[WriteMicrocoded], (instregex "RC(R|L)(8|16|32|64)m1")>;
+
+// i.
+def : InstRW<[WriteShift], (instregex "RC(R|L)(8|16|32|64)r(i|CL)")>;
+
+// m,i.
+def : InstRW<[WriteMicrocoded], (instregex "RC(R|L)(8|16|32|64)m(i|CL)")>;
+
+// SHR SHL SAR.
+// m,i.
+def : InstRW<[WriteShiftLd], (instregex "S(A|H)(R|L)(8|16|32|64)m(i|1)")>;
+
+// SHRD SHLD.
+// r,r
+def : InstRW<[WriteShift], (instregex "SH(R|L)D(16|32|64)rri8")>;
+
+// m,r
+def : InstRW<[WriteShiftLd], (instregex "SH(R|L)D(16|32|64)mri8")>;
+
+// r,r,cl.
+def : InstRW<[WriteMicrocoded], (instregex "SHLD(16|32|64)rrCL")>;
+
+// r,r,cl.
+def : InstRW<[WriteMicrocoded], (instregex "SHRD(16|32|64)rrCL")>;
+
+// m,r,cl.
+def : InstRW<[WriteMicrocoded], (instregex "SH(R|L)D(16|32|64)mrCL")>;
+
+// SETcc.
+// r.
+def : InstRW<[WriteShift],
+ (instregex "SET(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)r")>;
+// m.
+def : InstRW<[WriteShift],
+ (instregex "SET(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)m")>;
+
+// LZCNT TZCNT.
+// r,r.
+def : InstRW<[ZnWriteALULat2], (instregex "(LZCNT|TZCNT)(16|32|64)rr")>;
+// r,m.
+def : InstRW<[ZnWriteALULat2Ld, ReadAfterLd], (instregex "(LZCNT|TZCNT)(16|32|64)rm")>;
+
+//-- Misc instructions --//
+// CMPXCHG.
+def ZnWriteCMPXCHG : SchedWriteRes<[ZnAGU, ZnALU]> {
+ let Latency = 8;
+ let NumMicroOps = 5;
+}
+def : InstRW<[ZnWriteCMPXCHG], (instregex "CMPXCHG(8|16|32|64)rm")>;
+
+// CMPXCHG8B.
+def ZnWriteCMPXCHG8B : SchedWriteRes<[ZnAGU, ZnALU]> {
+ let NumMicroOps = 18;
+}
+def : InstRW<[ZnWriteCMPXCHG8B], (instregex "CMPXCHG8B")>;
+
+def : InstRW<[WriteMicrocoded], (instregex "CMPXCHG16B")>;
+
+// LEAVE
+def ZnWriteLEAVE : SchedWriteRes<[ZnALU, ZnAGU]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteLEAVE], (instregex "LEAVE")>;
+
+// PAUSE.
+def : InstRW<[WriteMicrocoded], (instregex "PAUSE")>;
+
+// RDTSC.
+def : InstRW<[WriteMicrocoded], (instregex "RDTSC")>;
+
+// RDPMC.
+def : InstRW<[WriteMicrocoded], (instregex "RDPMC")>;
+
+// RDRAND.
+def : InstRW<[WriteMicrocoded], (instregex "RDRAND(16|32|64)r")>;
+
+// XGETBV.
+def : InstRW<[WriteMicrocoded], (instregex "XGETBV")>;
+
+//-- String instructions --//
+// CMPS.
+def : InstRW<[WriteMicrocoded], (instregex "CMPS(B|L|Q|W)")>;
+
+// LODSB/W.
+def : InstRW<[WriteMicrocoded], (instregex "LODS(B|W)")>;
+
+// LODSD/Q.
+def : InstRW<[WriteMicrocoded], (instregex "LODS(L|Q)")>;
+
+// MOVS.
+def : InstRW<[WriteMicrocoded], (instregex "MOVS(B|L|Q|W)")>;
+
+// SCAS.
+def : InstRW<[WriteMicrocoded], (instregex "SCAS(B|W|L|Q)")>;
+
+// STOS
+def : InstRW<[WriteMicrocoded], (instregex "STOS(B|L|Q|W)")>;
+
+// XADD.
+def : InstRW<[WriteMicrocoded], (instregex "XADD(8|16|32|64)rm")>;
+
+//=== Floating Point x87 Instructions ===//
+//-- Move instructions --//
+
+def ZnWriteFLDr : SchedWriteRes<[ZnFPU13]> ;
+
+def ZnWriteSTr: SchedWriteRes<[ZnFPU23]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+}
+
+// LD_F.
+// r.
+def : InstRW<[ZnWriteFLDr], (instregex "LD_Frr")>;
+
+// m.
+def ZnWriteLD_F80m : SchedWriteRes<[ZnAGU, ZnFPU13]> {
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteLD_F80m], (instregex "LD_F80m")>;
+
+// FBLD.
+def : InstRW<[WriteMicrocoded], (instregex "FBLDm")>;
+
+// FST(P).
+// r.
+def : InstRW<[ZnWriteSTr], (instregex "ST_(F|FP)rr")>;
+
+// m80.
+def ZnWriteST_FP80m : SchedWriteRes<[ZnAGU, ZnFPU23]> {
+ let Latency = 5;
+}
+def : InstRW<[ZnWriteST_FP80m], (instregex "ST_FP80m")>;
+
+// FBSTP.
+// m80.
+def : InstRW<[WriteMicrocoded], (instregex "FBSTPm")>;
+
+def ZnWriteFXCH : SchedWriteRes<[ZnFPU]>;
+
+// FXCHG.
+def : InstRW<[ZnWriteFXCH], (instregex "XCH_F")>;
+
+// FILD.
+def ZnWriteFILD : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 11;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteFILD], (instregex "ILD_F(16|32|64)m")>;
+
+// FIST(P) FISTTP.
+def ZnWriteFIST : SchedWriteRes<[ZnAGU, ZnFPU23]> {
+ let Latency = 12;
+}
+def : InstRW<[ZnWriteFIST], (instregex "IST_(F|FP)(16|32)m")>;
+
+def ZnWriteFPU13 : SchedWriteRes<[ZnAGU, ZnFPU13]> {
+ let Latency = 8;
+}
+
+def ZnWriteFPU3 : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 11;
+}
+
+// FLDZ.
+def : InstRW<[ZnWriteFPU13], (instregex "LD_F0")>;
+
+// FLD1.
+def : InstRW<[ZnWriteFPU3], (instregex "LD_F1")>;
+
+// FLDPI FLDL2E etc.
+def : InstRW<[ZnWriteFPU3], (instregex "FLDPI", "FLDL2(T|E)" "FLDL(G|N)2")>;
+
+def : InstRW<[WriteMicrocoded], (instregex "CMOV(B|BE|P|NB|NBE|NE|NP)_F")>;
+
+// FNSTSW.
+// AX.
+def : InstRW<[WriteMicrocoded], (instregex "FNSTSW16r")>;
+
+// m16.
+def : InstRW<[WriteMicrocoded], (instregex "FNSTSWm")>;
+
+// FLDCW.
+def : InstRW<[WriteMicrocoded], (instregex "FLDCW16m")>;
+
+// FNSTCW.
+def : InstRW<[WriteMicrocoded], (instregex "FNSTCW16m")>;
+
+// FINCSTP FDECSTP.
+def : InstRW<[ZnWriteFPU3], (instregex "FINCSTP", "FDECSTP")>;
+
+// FFREE.
+def : InstRW<[ZnWriteFPU3], (instregex "FFREE")>;
+
+// FNSAVE.
+def : InstRW<[WriteMicrocoded], (instregex "FSAVEm")>;
+
+// FRSTOR.
+def : InstRW<[WriteMicrocoded], (instregex "FRSTORm")>;
+
+//-- Arithmetic instructions --//
+
+def ZnWriteFPU3Lat2 : SchedWriteRes<[ZnFPU3]> {
+ let Latency = 2;
+}
+
+def ZnWriteFPU3Lat2Ld : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 9;
+}
+
+def ZnWriteFPU3Lat1 : SchedWriteRes<[ZnFPU3]> ;
+
+def ZnWriteFPU0Lat1 : SchedWriteRes<[ZnFPU0]> ;
+
+def ZnWriteFPU0Lat1Ld : SchedWriteRes<[ZnAGU, ZnFPU0]> {
+ let Latency = 8;
+}
+
+// FABS.
+def : InstRW<[ZnWriteFPU3Lat2], (instregex "ABS_F")>;
+
+// FCHS.
+def : InstRW<[ZnWriteFPU3Lat1], (instregex "CHS_F")>;
+
+// FCOM(P) FUCOM(P).
+// r.
+def : InstRW<[ZnWriteFPU0Lat1], (instregex "COM_FST0r", "COMP_FST0r", "UCOM_Fr",
+ "UCOM_FPr")>;
+// m.
+def : InstRW<[ZnWriteFPU0Lat1Ld], (instregex "FCOM(32|64)m", "FCOMP(32|64)m")>;
+
+// FCOMPP FUCOMPP.
+// r.
+def : InstRW<[ZnWriteFPU0Lat1], (instregex "FCOMPP", "UCOM_FPPr")>;
+
+def ZnWriteFPU02 : SchedWriteRes<[ZnAGU, ZnFPU02]>
+{
+ let Latency = 9;
+}
+
+// FCOMI(P) FUCOMI(P).
+// m.
+def : InstRW<[ZnWriteFPU02], (instregex "COM_FIr", "COM_FIPr", "UCOM_FIr",
+ "UCOM_FIPr")>;
+
+def ZnWriteFPU03 : SchedWriteRes<[ZnAGU, ZnFPU03]>
+{
+ let Latency = 12;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,3];
+}
+
+// FICOM(P).
+def : InstRW<[ZnWriteFPU03], (instregex "FICOM(16|32)m", "FICOMP(16|32)m")>;
+
+// FTST.
+def : InstRW<[ZnWriteFPU0Lat1], (instregex "TST_F")>;
+
+// FXAM.
+def : InstRW<[ZnWriteFPU3Lat1], (instregex "FXAM")>;
+
+// FPREM.
+def : InstRW<[WriteMicrocoded], (instregex "FPREM")>;
+
+// FPREM1.
+def : InstRW<[WriteMicrocoded], (instregex "FPREM1")>;
+
+// FRNDINT.
+def : InstRW<[WriteMicrocoded], (instregex "FRNDINT")>;
+
+// FSCALE.
+def : InstRW<[WriteMicrocoded], (instregex "FSCALE")>;
+
+// FXTRACT.
+def : InstRW<[WriteMicrocoded], (instregex "FXTRACT")>;
+
+// FNOP.
+def : InstRW<[ZnWriteFPU0Lat1], (instregex "FNOP")>;
+
+// WAIT.
+def : InstRW<[ZnWriteFPU0Lat1], (instregex "WAIT")>;
+
+// FNCLEX.
+def : InstRW<[WriteMicrocoded], (instregex "FNCLEX")>;
+
+// FNINIT.
+def : InstRW<[WriteMicrocoded], (instregex "FNINIT")>;
+
+//=== Integer MMX and XMM Instructions ===//
+//-- Move instructions --//
+
+// Moves from GPR to FPR incurs a penalty
+def ZnWriteFPU2 : SchedWriteRes<[ZnFPU2]> {
+ let Latency = 3;
+}
+
+// Move to ALU doesn't incur penalty
+def ZnWriteToALU2 : SchedWriteRes<[ZnFPU2]> {
+ let Latency = 2;
+}
+
+def ZnWriteFPU : SchedWriteRes<[ZnFPU]>;
+def ZnWriteFPUY : SchedWriteRes<[ZnFPU]> {
+ let NumMicroOps = 2;
+ let Latency=2;
+}
+
+// MOVD.
+// r32/64 <- (x)mm.
+def : InstRW<[ZnWriteToALU2], (instregex "MMX_MOVD64grr", "MMX_MOVD64from64rr",
+ "VMOVPDI2DIrr", "MOVPDI2DIrr")>;
+
+// (x)mm <- r32/64.
+def : InstRW<[ZnWriteFPU2], (instregex "MMX_MOVD64rr", "MMX_MOVD64to64rr",
+ "VMOVDI2PDIrr", "MOVDI2PDIrr")>;
+
+// MOVQ.
+// r64 <- (x)mm.
+def : InstRW<[ZnWriteToALU2], (instregex "VMOVPQIto64rr")>;
+
+// (x)mm <- r64.
+def : InstRW<[ZnWriteFPU2], (instregex "VMOV64toPQIrr", "VMOVZQI2PQIrr")>;
+
+// (x)mm <- (x)mm.
+def : InstRW<[ZnWriteFPU], (instregex "MMX_MOVQ64rr")>;
+
+// (V)MOVDQA/U.
+// x <- x.
+def : InstRW<[ZnWriteFPU], (instregex "MOVDQ(A|U)rr", "VMOVDQ(A|U)rr",
+ "MOVDQ(A|U)rr_REV", "VMOVDQ(A|U)rr_REV")>;
+
+// y <- y.
+def : InstRW<[ZnWriteFPUY], (instregex "VMOVDQ(A|U)Yrr", "VMOVDQ(A|U)Yrr_REV")>;
+
+// MOVDQ2Q.
+def : InstRW<[ZnWriteFPU], (instregex "MMX_MOVDQ2Qrr")>;
+
+// MOVQ2DQ.
+def : InstRW<[ZnWriteFPU], (instregex "MMX_MOVQ2DQrr")>;
+
+// PACKSSWB/DW.
+// mm <- mm.
+def ZnWriteFPU12 : SchedWriteRes<[ZnFPU12]> ;
+def ZnWriteFPU12Y : SchedWriteRes<[ZnFPU12]> {
+ let NumMicroOps = 2;
+}
+def ZnWriteFPU12m : SchedWriteRes<[ZnAGU, ZnFPU12]> ;
+
+def : InstRW<[ZnWriteFPU12], (instregex "MMX_PACKSSDWirr",
+ "MMX_PACKSSWBirr", "MMX_PACKUSWBirr")>;
+def : InstRW<[ZnWriteFPU12m], (instregex "MMX_PACKSSDWirm",
+ "MMX_PACKSSWBirm", "MMX_PACKUSWBirm")>;
+
+// VPMOVSX/ZX BW BD BQ DW DQ.
+// y <- x.
+def : InstRW<[ZnWriteFPU12Y], (instregex "VPMOV(SX|ZX)(BW|BQ|DW|DQ)Yrr")>;
+
+def ZnWriteFPU013 : SchedWriteRes<[ZnFPU013]> ;
+def ZnWriteFPU013Y : SchedWriteRes<[ZnFPU013]> {
+ let Latency = 2;
+}
+def ZnWriteFPU013m : SchedWriteRes<[ZnAGU, ZnFPU013]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+}
+def ZnWriteFPU013Ld : SchedWriteRes<[ZnAGU, ZnFPU013]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+}
+def ZnWriteFPU013LdY : SchedWriteRes<[ZnAGU, ZnFPU013]> {
+ let Latency = 9;
+ let NumMicroOps = 2;
+}
+
+// PBLENDW.
+// x,x,i / v,v,v,i
+def : InstRW<[ZnWriteFPU013], (instregex "(V?)PBLENDWrri")>;
+// ymm
+def : InstRW<[ZnWriteFPU013Y], (instregex "(V?)PBLENDWYrri")>;
+
+// x,m,i / v,v,m,i
+def : InstRW<[ZnWriteFPU013Ld], (instregex "(V?)PBLENDWrmi")>;
+// y,m,i
+def : InstRW<[ZnWriteFPU013LdY], (instregex "(V?)PBLENDWYrmi")>;
+
+def ZnWriteFPU01 : SchedWriteRes<[ZnFPU01]> ;
+def ZnWriteFPU01Y : SchedWriteRes<[ZnFPU01]> {
+ let NumMicroOps = 2;
+}
+
+// VPBLENDD.
+// v,v,v,i.
+def : InstRW<[ZnWriteFPU01], (instregex "VPBLENDDrri")>;
+// ymm
+def : InstRW<[ZnWriteFPU01Y], (instregex "VPBLENDDYrri")>;
+
+// v,v,m,i
+def ZnWriteFPU01Op2 : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+ let NumMicroOps = 2;
+ let Latency = 8;
+ let ResourceCycles = [1, 2];
+}
+def ZnWriteFPU01Op2Y : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+ let NumMicroOps = 2;
+ let Latency = 9;
+ let ResourceCycles = [1, 3];
+}
+def : InstRW<[ZnWriteFPU01Op2], (instregex "VPBLENDDrmi")>;
+def : InstRW<[ZnWriteFPU01Op2Y], (instregex "VPBLENDDYrmi")>;
+
+// MASKMOVQ.
+def : InstRW<[WriteMicrocoded], (instregex "MMX_MASKMOVQ(64)?")>;
+
+// MASKMOVDQU.
+def : InstRW<[WriteMicrocoded], (instregex "(V?)MASKMOVDQU(64)?")>;
+
+// VPMASKMOVQ.
+// ymm
+def : InstRW<[ZnWriteFPU01Op2],(instregex "VPMASKMOVQrm")>;
+def : InstRW<[ZnWriteFPU01Op2Y],(instregex "VPMASKMOVQYrm")>;
+
+def : InstRW<[WriteMicrocoded],
+ (instregex "VPMASKMOVD(Y?)rm")>;
+// m, v,v.
+def : InstRW<[WriteMicrocoded], (instregex "VPMASKMOV(D|Q)(Y?)mr")>;
+
+// PMOVMSKB.
+def ZnWritePMOVMSKB : SchedWriteRes<[ZnFPU2]> {
+ let NumMicroOps = 2;
+}
+def ZnWritePMOVMSKBY : SchedWriteRes<[ZnFPU2]> {
+ let Latency = 2;
+}
+def : InstRW<[ZnWritePMOVMSKB], (instregex "(V|MMX_)?PMOVMSKBrr")>;
+def : InstRW<[ZnWritePMOVMSKBY], (instregex "(V|MMX_)?PMOVMSKBYrr")>;
+
+// PEXTR B/W/D/Q.
+// r32,x,i.
+def ZnWritePEXTRr : SchedWriteRes<[ZnFPU12, ZnFPU2]> {
+ let Latency = 2;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWritePEXTRr], (instregex "PEXTR(B|W|D|Q)rr", "MMX_PEXTRWirri")>;
+
+def ZnWritePEXTRm : SchedWriteRes<[ZnAGU, ZnFPU12, ZnFPU2]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2, 3];
+}
+// m8,x,i.
+def : InstRW<[ZnWritePEXTRm], (instregex "PEXTR(B|W|D|Q)mr")>;
+
+// VPBROADCAST B/W.
+// x, m8/16.
+def ZnWriteVPBROADCAST128Ld : SchedWriteRes<[ZnAGU, ZnFPU12]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteVPBROADCAST128Ld],
+ (instregex "VPBROADCAST(B|W)rm")>;
+
+// y, m8/16
+def ZnWriteVPBROADCAST256Ld : SchedWriteRes<[ZnAGU, ZnFPU1]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteVPBROADCAST256Ld],
+ (instregex "VPBROADCAST(B|W)Yrm")>;
+
+// VPGATHER.
+def : InstRW<[WriteMicrocoded], (instregex "VPGATHER(Q|D)(Q|D)(Y?)rm")>;
+
+//-- Arithmetic instructions --//
+
+// HADD, HSUB PS/PD
+// PHADD|PHSUB (S) W/D.
+def : InstRW<[WriteMicrocoded], (instregex "MMX_PHADD(W?)r(r|m)64",
+ "MMX_PHADDSWr(r|m)64",
+ "MMX_PHSUB(W|D)r(r|m)64",
+ "MMX_PHSUBSWrr64",
+ "(V?)PH(ADD|SUB)(W|D)(Y?)r(r|m)",
+ "(V?)PH(ADD|SUB)SWr(r|m)(256)?")>;
+
+
+// PCMPGTQ.
+def ZnWritePCMPGTQr : SchedWriteRes<[ZnFPU03]>;
+def : InstRW<[ZnWritePCMPGTQr], (instregex "(V?)PCMPGTQ(Y?)rr")>;
+
+// x <- x,m.
+def ZnWritePCMPGTQm : SchedWriteRes<[ZnAGU, ZnFPU03]> {
+ let Latency = 8;
+}
+// ymm.
+def ZnWritePCMPGTQYm : SchedWriteRes<[ZnAGU, ZnFPU03]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,2];
+}
+def : InstRW<[ZnWritePCMPGTQm], (instregex "(V?)PCMPGTQrm")>;
+def : InstRW<[ZnWritePCMPGTQYm], (instregex "(V?)PCMPGTQYrm")>;
+
+// PMULLD.
+// x,x.
+def ZnWritePMULLDr : SchedWriteRes<[ZnFPU0]> {
+ let Latency = 4;
+}
+// ymm.
+def ZnWritePMULLDYr : SchedWriteRes<[ZnFPU0]> {
+ let Latency = 5;
+ let ResourceCycles = [2];
+}
+def : InstRW<[ZnWritePMULLDr], (instregex "(V?)PMULLDrr")>;
+def : InstRW<[ZnWritePMULLDYr], (instregex "(V?)PMULLDYrr")>;
+
+// x,m.
+def ZnWritePMULLDm : SchedWriteRes<[ZnAGU, ZnFPU0]> {
+ let Latency = 11;
+ let NumMicroOps = 2;
}
+// y,m.
+def ZnWritePMULLDYm : SchedWriteRes<[ZnAGU, ZnFPU0]> {
+ let Latency = 12;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWritePMULLDm], (instregex "(V?)PMULLDrm")>;
+def : InstRW<[ZnWritePMULLDYm], (instregex "(V?)PMULLDYrm")>;
+
+//-- Logic instructions --//
+
+// PTEST.
+// v,v.
+def ZnWritePTESTr : SchedWriteRes<[ZnFPU12]> {
+ let ResourceCycles = [2];
+}
+def : InstRW<[ZnWritePTESTr], (instregex "(V?)PTEST(Y?)rr")>;
+
+// v,m.
+def ZnWritePTESTm : SchedWriteRes<[ZnAGU, ZnFPU12]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWritePTESTm], (instregex "(V?)PTEST(Y?)rm")>;
+
+// PSLL,PSRL,PSRA W/D/Q.
+// x,x / v,v,x.
+def ZnWritePShift : SchedWriteRes<[ZnFPU2]> ;
+def ZnWritePShiftY : SchedWriteRes<[ZnFPU2]> {
+ let Latency = 2;
+}
+def ZnWritePShiftLd : SchedWriteRes<[ZnAGU,ZnFPU2]> {
+ let Latency = 8;
+}
+def ZnWritePShiftYLd : SchedWriteRes<[ZnAGU, ZnFPU2]> {
+ let Latency = 9;
+}
+def : InstRW<[ZnWritePShift], (instregex "(V?)PS(LL|RL|RA)(W|D|Q)rr")>;
+def : InstRW<[ZnWritePShiftY], (instregex "(V?)PS(LL|RL|RA)(W|D|Q)Yrr")>;
+
+def : InstRW<[ZnWritePShiftLd], (instregex "(V?)PS(LL|RL|RA)(W|D|Q)rm")>;
+def : InstRW<[ZnWritePShiftYLd], (instregex "(V?)PS(LL|RL|RA)(W|D|Q)Yrm")>;
+
+// PSLL,PSRL DQ.
+def : InstRW<[ZnWritePShift], (instregex "(V?)PS(R|L)LDQri")>;
+def : InstRW<[ZnWritePShiftY], (instregex "(V?)PS(R|L)LDQYri")>;
+
+//=== Floating Point XMM and YMM Instructions ===//
+//-- Move instructions --//
+
+// MOVMSKP S/D.
+// r32 <- x,y.
+def ZnWriteMOVMSKPr : SchedWriteRes<[ZnFPU2]> ;
+def : InstRW<[ZnWriteMOVMSKPr], (instregex "(V?)MOVMSKP(S|D)(Y?)rr")>;
+
+// VPERM2F128.
+def : InstRW<[WriteMicrocoded], (instregex "VPERM2F128rr")>;
+def : InstRW<[WriteMicrocoded], (instregex "VPERM2F128rm")>;
+
+// BLENDVP S/D.
+def ZnWriteFPU01Lat3 : SchedWriteRes<[ZnFPU013]> {
+ let Latency = 3;
+}
+def ZnWriteFPU01Lat3Ld : SchedWriteRes<[ZnAGU, ZnFPU013]> {
+ let Latency = 11;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteFPU01Lat3], (instregex "BLENDVP(S|D)rr0")>;
+def : InstRW<[ZnWriteFPU01Lat3Ld, ReadAfterLd], (instregex "BLENDVP(S|D)rm0")>;
+
+def ZnWriteBROADCAST : SchedWriteRes<[ZnAGU, ZnFPU13]> {
+ let NumMicroOps = 2;
+ let Latency = 8;
+}
+// VBROADCASTF128.
+def : InstRW<[ZnWriteBROADCAST], (instregex "VBROADCASTF128")>;
+
+// EXTRACTPS.
+// r32,x,i.
+def ZnWriteEXTRACTPSr : SchedWriteRes<[ZnFPU12, ZnFPU2]> {
+ let Latency = 2;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteEXTRACTPSr], (instregex "(V?)EXTRACTPSrr")>;
+
+def ZnWriteEXTRACTPSm : SchedWriteRes<[ZnAGU,ZnFPU12, ZnFPU2]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+ let ResourceCycles = [5, 1, 2];
+}
+// m32,x,i.
+def : InstRW<[ZnWriteEXTRACTPSm], (instregex "(V?)EXTRACTPSmr")>;
+
+// VEXTRACTF128.
+// x,y,i.
+def : InstRW<[ZnWriteFPU013], (instregex "VEXTRACTF128rr")>;
+
+// m128,y,i.
+def : InstRW<[ZnWriteFPU013m], (instregex "VEXTRACTF128mr")>;
+
+def ZnWriteVINSERT128r: SchedWriteRes<[ZnFPU013]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+}
+def ZnWriteVINSERT128Ld: SchedWriteRes<[ZnAGU,ZnFPU013]> {
+ let Latency = 9;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2];
+}
+// VINSERTF128.
+// y,y,x,i.
+def : InstRW<[ZnWriteVINSERT128r], (instregex "VINSERTF128rr")>;
+def : InstRW<[ZnWriteVINSERT128Ld], (instregex "VINSERTF128rm")>;
+
+// VMASKMOVP S/D.
+// x,x,m.
+def ZnWriteVMASKMOVPLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+ let Latency = 8;
+}
+// y,y,m.
+def ZnWriteVMASKMOVPLdY : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2];
+}
+def ZnWriteVMASKMOVPm : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+ let Latency = 4;
+}
+def : InstRW<[ZnWriteVMASKMOVPLd], (instregex "VMASKMOVP(S|D)rm")>;
+def : InstRW<[ZnWriteVMASKMOVPLdY], (instregex "VMASKMOVP(S|D)Yrm")>;
+def : InstRW<[ZnWriteVMASKMOVPm], (instregex "VMASKMOVP(S|D)mr")>;
+
+// m256,y,y.
+def ZnWriteVMASKMOVPYmr : SchedWriteRes<[ZnAGU,ZnFPU01]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteVMASKMOVPYmr], (instregex "VMASKMOVP(S|D)Ymr")>;
+
+// VGATHERDPS.
+// x.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERDPSrm")>;
+// y.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERDPSYrm")>;
+
+// VGATHERQPS.
+// x.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERQPSrm")>;
+
+// y.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERQPSYrm")>;
+
+// VGATHERDPD.
+// x.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERDPDrm")>;
+
+// y.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERDPDYrm")>;
+
+// VGATHERQPD.
+// x.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERQPDrm")>;
+
+// y.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERQPDYrm")>;
+
+//-- Conversion instructions --//
+def ZnWriteCVTPD2PSr: SchedWriteRes<[ZnFPU3]> {
+ let Latency = 4;
+}
+// CVTPD2PS.
+// x,x.
+def : InstRW<[ZnWriteCVTPD2PSr], (instregex "(V?)CVTPD2PSrr")>;
+
+def ZnWriteCVTPD2PSLd: SchedWriteRes<[ZnAGU,ZnFPU03]> {
+ let Latency = 11;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,2];
+}
+// x,m128.
+def : InstRW<[ZnWriteCVTPD2PSLd], (instregex "(V?)CVTPD2PS(X?)rm")>;
+
+// x,y.
+def ZnWriteCVTPD2PSYr : SchedWriteRes<[ZnFPU3]> {
+ let Latency = 5;
+}
+def : InstRW<[ZnWriteCVTPD2PSYr], (instregex "(V?)CVTPD2PSYrr")>;
+
+// x,m256.
+def ZnWriteCVTPD2PSYLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 11;
+}
+def : InstRW<[ZnWriteCVTPD2PSYLd], (instregex "(V?)CVTPD2PSYrm")>;
+
+// CVTSD2SS.
+// x,x.
+// Same as WriteCVTPD2PSr
+def : InstRW<[ZnWriteCVTPD2PSr], (instregex "(Int_)?(V)?CVTSD2SSrr")>;
+
+// x,m64.
+def : InstRW<[ZnWriteCVTPD2PSLd], (instregex "(Int_)?(V)?CVTSD2SSrm")>;
+
+// CVTPS2PD.
+// x,x.
+def ZnWriteCVTPS2PDr : SchedWriteRes<[ZnFPU3]> {
+ let Latency = 3;
+}
+def : InstRW<[ZnWriteCVTPS2PDr], (instregex "(V?)CVTPS2PDrr")>;
+
+// x,m64.
+// y,m128.
+def ZnWriteCVTPS2PDLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 10;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteCVTPS2PDLd], (instregex "(V?)CVTPS2PD(Y?)rm")>;
+
+// y,x.
+def ZnWriteVCVTPS2PDY : SchedWriteRes<[ZnFPU3]> {
+ let Latency = 3;
+}
+def : InstRW<[ZnWriteVCVTPS2PDY], (instregex "VCVTPS2PDYrr")>;
+
+// CVTSS2SD.
+// x,x.
+def ZnWriteCVTSS2SDr : SchedWriteRes<[ZnFPU3]> {
+ let Latency = 4;
+}
+def : InstRW<[ZnWriteCVTSS2SDr], (instregex "(Int_)?(V?)CVTSS2SDrr")>;
+
+// x,m32.
+def ZnWriteCVTSS2SDLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 11;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteCVTSS2SDLd], (instregex "(Int_)?(V?)CVTSS2SDrm")>;
+
+def ZnWriteCVTDQ2PDr: SchedWriteRes<[ZnFPU12,ZnFPU3]> {
+ let Latency = 5;
+}
+// CVTDQ2PD.
+// x,x.
+def : InstRW<[ZnWriteCVTDQ2PDr], (instregex "(V)?CVTDQ2PDrr")>;
+
+// Same as xmm
+// y,x.
+def : InstRW<[ZnWriteCVTDQ2PDr], (instregex "VCVTDQ2PDYrr")>;
+
+def ZnWriteCVTPD2DQr: SchedWriteRes<[ZnFPU12, ZnFPU3]> {
+ let Latency = 5;
+}
+// CVT(T)PD2DQ.
+// x,x.
+def : InstRW<[ZnWriteCVTDQ2PDr], (instregex "(V?)CVT(T?)PD2DQrr")>;
+
+def ZnWriteCVTPD2DQLd: SchedWriteRes<[ZnAGU,ZnFPU12,ZnFPU3]> {
+ let Latency = 12;
+ let NumMicroOps = 2;
+}
+// x,m128.
+def : InstRW<[ZnWriteCVTPD2DQLd], (instregex "(V?)CVT(T?)PD2DQrm")>;
+// same as xmm handling
+// x,y.
+def : InstRW<[ZnWriteCVTPD2DQr], (instregex "VCVT(T?)PD2DQYrr")>;
+// x,m256.
+def : InstRW<[ZnWriteCVTPD2DQLd], (instregex "VCVT(T?)PD2DQYrm")>;
+def : InstRW<[ZnWriteCVTPD2DQLd], (instregex "VCVT(T?)PD2DQ(64)?rm")>;
+
+def ZnWriteCVTPS2PIr: SchedWriteRes<[ZnFPU3]> {
+ let Latency = 4;
+}
+// CVT(T)PS2PI.
+// mm,x.
+def : InstRW<[ZnWriteCVTPS2PIr], (instregex "MMX_CVT(T?)PS2PIirr")>;
+
+// CVTPI2PD.
+// x,mm.
+def : InstRW<[ZnWriteCVTPS2PDr], (instregex "MMX_CVT(T?)PI2PDirr")>;
+
+// CVT(T)PD2PI.
+// mm,x.
+def : InstRW<[ZnWriteCVTPS2PIr], (instregex "MMX_CVT(T?)PD2PIirr")>;
+
+def ZnWriteCVSTSI2SSr: SchedWriteRes<[ZnFPU3]> {
+ let Latency = 5;
+}
+// CVSTSI2SS.
+// x,r32.
+def : InstRW<[ZnWriteCVSTSI2SSr], (instregex "(Int_)?(V?)CVT(T?)SI2SS(64)?rr")>;
+
+// same as CVTPD2DQr
+// CVT(T)SS2SI.
+// r32,x.
+def : InstRW<[ZnWriteCVTPD2DQr], (instregex "(Int_)?(V?)CVT(T?)SS2SI(64)?rr")>;
+// same as CVTPD2DQm
+// r32,m32.
+def : InstRW<[ZnWriteCVTPD2DQLd], (instregex "(Int_)?(V?)CVT(T?)SS2SI(64)?rm")>;
+
+def ZnWriteCVSTSI2SDr: SchedWriteRes<[ZnFPU013, ZnFPU3]> {
+ let Latency = 5;
+}
+// CVTSI2SD.
+// x,r32/64.
+def : InstRW<[ZnWriteCVSTSI2SDr], (instregex "(Int_)?(V?)CVTSI2SS(64)?rr")>;
+
+
+def ZnWriteCVSTSI2SIr: SchedWriteRes<[ZnFPU3, ZnFPU2]> {
+ let Latency = 5;
+}
+def ZnWriteCVSTSI2SILd: SchedWriteRes<[ZnAGU, ZnFPU3, ZnFPU2]> {
+ let Latency = 12;
+}
+// CVTSD2SI.
+// r32/64
+def : InstRW<[ZnWriteCVSTSI2SIr], (instregex "(Int_)?CVT(T?)SD2SI(64)?rr")>;
+// r32,m32.
+def : InstRW<[ZnWriteCVSTSI2SILd], (instregex "(Int_)?CVT(T?)SD2SI(64)?rm")>;
+
+
+def ZnWriteVCVSTSI2SIr: SchedWriteRes<[ZnFPU3]> {
+ let Latency = 5;
+}
+def ZnWriteVCVSTSI2SILd: SchedWriteRes<[ZnFPU3, ZnAGU]> {
+ let Latency = 12;
+}
+// VCVTSD2SI.
+// r32/64
+def : InstRW<[ZnWriteCVSTSI2SIr], (instregex "(Int_)?VCVT(T?)SD2SI(64)?rr")>;
+// r32,m32.
+def : InstRW<[ZnWriteCVSTSI2SILd], (instregex "(Int_)?VCVT(T?)SD2SI(64)?rm")>;
+
+// VCVTPS2PH.
+// x,v,i.
+def : InstRW<[WriteMicrocoded], (instregex "VCVTPS2PH(Y?)rr")>;
+// m,v,i.
+def : InstRW<[WriteMicrocoded], (instregex "VCVTPS2PH(Y?)mr")>;
+
+// VCVTPH2PS.
+// v,x.
+def : InstRW<[WriteMicrocoded], (instregex "VCVTPH2PS(Y?)rr")>;
+// v,m.
+def : InstRW<[WriteMicrocoded], (instregex "VCVTPH2PS(Y?)rm")>;
+
+//-- SSE4A instructions --//
+// EXTRQ
+def ZnWriteEXTRQ: SchedWriteRes<[ZnFPU12, ZnFPU2]> {
+ let Latency = 2;
+}
+def : InstRW<[ZnWriteEXTRQ], (instregex "EXTRQ")>;
+
+// INSERTQ
+def ZnWriteINSERTQ: SchedWriteRes<[ZnFPU03,ZnFPU1]> {
+ let Latency = 4;
+}
+def : InstRW<[ZnWriteINSERTQ], (instregex "INSERTQ")>;
+
+// MOVNTSS/MOVNTSD
+def ZnWriteMOVNT: SchedWriteRes<[ZnAGU,ZnFPU2]> {
+ let Latency = 8;
+}
+def : InstRW<[ZnWriteMOVNT], (instregex "MOVNTS(S|D)")>;
+
+//-- SHA instructions --//
+// SHA256MSG2
+def : InstRW<[WriteMicrocoded], (instregex "SHA256MSG2(Y?)r(r|m)")>;
+
+// SHA1MSG1, SHA256MSG1
+// x,x.
+def ZnWriteSHA1MSG1r : SchedWriteRes<[ZnFPU12]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+}
+def : InstRW<[ZnWriteSHA1MSG1r], (instregex "SHA(1|256)MSG1rr")>;
+// x,m.
+def ZnWriteSHA1MSG1Ld : SchedWriteRes<[ZnAGU, ZnFPU12]> {
+ let Latency = 9;
+ let ResourceCycles = [1,2];
+}
+def : InstRW<[ZnWriteSHA1MSG1Ld], (instregex "SHA(1|256)MSG1rm")>;
+
+// SHA1MSG2
+// x,x.
+def ZnWriteSHA1MSG2r : SchedWriteRes<[ZnFPU12]> ;
+def : InstRW<[ZnWriteSHA1MSG2r], (instregex "SHA1MSG2rr")>;
+// x,m.
+def ZnWriteSHA1MSG2Ld : SchedWriteRes<[ZnAGU, ZnFPU12]> {
+ let Latency = 8;
+}
+def : InstRW<[ZnWriteSHA1MSG2Ld], (instregex "SHA1MSG2rm")>;
+
+// SHA1NEXTE
+// x,x.
+def ZnWriteSHA1NEXTEr : SchedWriteRes<[ZnFPU1]> ;
+def : InstRW<[ZnWriteSHA1NEXTEr], (instregex "SHA1NEXTErr")>;
+// x,m.
+def ZnWriteSHA1NEXTELd : SchedWriteRes<[ZnAGU, ZnFPU1]> {
+ let Latency = 8;
+}
+def : InstRW<[ZnWriteSHA1NEXTELd], (instregex "SHA1NEXTErm")>;
+
+// SHA1RNDS4
+// x,x.
+def ZnWriteSHA1RNDS4r : SchedWriteRes<[ZnFPU1]> {
+ let Latency = 6;
+}
+def : InstRW<[ZnWriteSHA1RNDS4r], (instregex "SHA1RNDS4rr")>;
+// x,m.
+def ZnWriteSHA1RNDS4Ld : SchedWriteRes<[ZnAGU, ZnFPU1]> {
+ let Latency = 13;
+}
+def : InstRW<[ZnWriteSHA1RNDS4Ld], (instregex "SHA1RNDS4rm")>;
+
+// SHA256RNDS2
+// x,x.
+def ZnWriteSHA256RNDS2r : SchedWriteRes<[ZnFPU1]> {
+ let Latency = 4;
+}
+def : InstRW<[ZnWriteSHA256RNDS2r], (instregex "SHA256RNDS2rr")>;
+// x,m.
+def ZnWriteSHA256RNDS2Ld : SchedWriteRes<[ZnAGU, ZnFPU1]> {
+ let Latency = 11;
+}
+def : InstRW<[ZnWriteSHA256RNDS2Ld], (instregex "SHA256RNDS2rm")>;
+
+//-- Arithmetic instructions --//
+
+// HADD, HSUB PS/PD
+def : InstRW<[WriteMicrocoded], (instregex "(V?)H(ADD|SUB)P(S|D)(Y?)r(r|m)")>;
+
+// MULL SS/SD PS/PD.
+// x,x / v,v,v.
+def ZnWriteMULr : SchedWriteRes<[ZnFPU01]> {
+ let Latency = 3;
+}
+// ymm.
+def ZnWriteMULYr : SchedWriteRes<[ZnFPU01]> {
+ let Latency = 4;
+}
+def : InstRW<[ZnWriteMULr], (instregex "(V?)MUL(P|S)(S|D)rr")>;
+def : InstRW<[ZnWriteMULYr], (instregex "(V?)MUL(P|S)(S|D)Yrr")>;
+
+// x,m / v,v,m.
+def ZnWriteMULLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+ let Latency = 10;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteMULLd], (instregex "(V?)MUL(P|S)(S|D)rm")>;
+
+// ymm
+def ZnWriteMULYLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+ let Latency = 11;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteMULYLd], (instregex "(V?)MUL(P|S)(S|D)Yrm")>;
+
+// VDIVPS.
+// y,y,y.
+def ZnWriteVDIVPSYr : SchedWriteRes<[ZnFPU3]> {
+ let Latency = 12;
+ let ResourceCycles = [12];
+}
+def : InstRW<[ZnWriteVDIVPSYr], (instregex "VDIVPSYrr")>;
+
+// y,y,m256.
+def ZnWriteVDIVPSYLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 19;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1, 19];
+}
+def : InstRW<[ZnWriteVDIVPSYLd], (instregex "VDIVPSYrm")>;
+
+// VDIVPD.
+// y,y,y.
+def ZnWriteVDIVPDY : SchedWriteRes<[ZnFPU3]> {
+ let Latency = 15;
+ let ResourceCycles = [15];
+}
+def : InstRW<[ZnWriteVDIVPDY], (instregex "VDIVPDYrr")>;
+
+// y,y,m256.
+def ZnWriteVDIVPDYLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 22;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,22];
+}
+def : InstRW<[ZnWriteVDIVPDYLd], (instregex "VDIVPDYrm")>;
+
+// VRCPPS.
+// y,y.
+def ZnWriteVRCPPSr : SchedWriteRes<[ZnFPU01]> {
+ let Latency = 5;
+}
+def : InstRW<[ZnWriteVRCPPSr], (instregex "VRCPPSYr(_Int)?")>;
+
+// y,m256.
+def ZnWriteVRCPPSLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+ let Latency = 12;
+ let NumMicroOps = 3;
+}
+def : InstRW<[ZnWriteVRCPPSLd], (instregex "VRCPPSYm(_Int)?")>;
+
+// ROUND SS/SD PS/PD.
+// v,v,i.
+def ZnWriteROUNDr : SchedWriteRes<[ZnFPU3]> {
+ let Latency = 4;
+}
+def : InstRW<[ZnWriteROUNDr], (instregex "(V?)ROUND(Y?)(S|P)(S|D)r(_Int)?")>;
+
+// VFMADD.
+// v,v,v.
+def ZnWriteFMADDr : SchedWriteRes<[ZnFPU03]> {
+ let Latency = 5;
+}
+def : InstRW<[ZnWriteFMADDr],
+ (instregex
+ "VF(N?)M(ADD|SUB|ADDSUB|SUBADD)P(S|D)(r213|r132|r231)r(Y)?",
+ "VF(N?)M(ADD|SUB)S(S|D)(r132|r231|r213)r",
+ "VF(N?)M(ADD|SUB)S(S|D)4rr(_REV|_Int)?",
+ "VF(N?)M(ADD|SUB)P(S|D)4rr(Y)?(_REV)?")>;
+
+// v,v,m.
+def ZnWriteFMADDm : SchedWriteRes<[ZnAGU, ZnFPU03]> {
+ let Latency = 12;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteFMADDm],
+ (instregex
+ "VF(N?)M(ADD|SUB|ADDSUB|SUBADD)P(S|D)(r213|r132|r231)m(Y)?",
+ "VF(N?)M(ADD|SUB)S(S|D)(r132|r231|r213)m",
+ "VF(N?)M(ADD|SUB)S(S|D)4(rm|mr)(_Int)?",
+ "VF(N?)M(ADD|SUB)P(S|D)4(rm|mr)(Y)?")>;
+
+// v,m,i.
+def ZnWriteROUNDm : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 11;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteROUNDm], (instregex "(V?)ROUND(Y?)(S|P)(S|D)m(_Int)?")>;
+
+// DPPS.
+// x,x,i / v,v,v,i.
+def : InstRW<[WriteMicrocoded], (instregex "(V?)DPPS(Y?)rri")>;
+
+// x,m,i / v,v,m,i.
+def : InstRW<[WriteMicrocoded], (instregex "(V?)DPPS(Y?)rmi")>;
+
+// DPPD.
+// x,x,i.
+def : InstRW<[WriteMicrocoded], (instregex "(V?)DPPDrri")>;
+
+// x,m,i.
+def : InstRW<[WriteMicrocoded], (instregex "(V?)DPPDrmi")>;
+
+// VSQRTPS.
+// y,y.
+def ZnWriteVSQRTPSYr : SchedWriteRes<[ZnFPU3]> {
+ let Latency = 28;
+ let ResourceCycles = [28];
+}
+def : InstRW<[ZnWriteVSQRTPSYr], (instregex "VSQRTPSYr")>;
+
+// y,m256.
+def ZnWriteVSQRTPSYLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 35;
+ let ResourceCycles = [1,35];
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteVSQRTPSYLd], (instregex "VSQRTPSYm")>;
+
+// VSQRTPD.
+// y,y.
+def ZnWriteVSQRTPDYr : SchedWriteRes<[ZnFPU3]> {
+ let Latency = 40;
+ let ResourceCycles = [40];
+}
+def : InstRW<[ZnWriteVSQRTPDYr], (instregex "VSQRTPDYr")>;
+
+// y,m256.
+def ZnWriteVSQRTPDYLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+ let Latency = 47;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,47];
+}
+def : InstRW<[ZnWriteVSQRTPDYLd], (instregex "VSQRTPDYm")>;
+
+// RSQRTSS
+// x,x.
+def ZnWriteRSQRTSSr : SchedWriteRes<[ZnFPU02]> {
+ let Latency = 5;
+}
+def : InstRW<[ZnWriteRSQRTSSr], (instregex "(V?)RSQRTSS(Y?)r(_Int)?")>;
+
+// RSQRTPS
+// x,x.
+def ZnWriteRSQRTPSr : SchedWriteRes<[ZnFPU01]> {
+ let Latency = 5;
+}
+def : InstRW<[ZnWriteRSQRTPSr], (instregex "(V?)RSQRTPS(Y?)r(_Int)?")>;
+
+// RSQRTSSm
+// x,m128.
+def ZnWriteRSQRTSSLd: SchedWriteRes<[ZnAGU, ZnFPU02]> {
+ let Latency = 12;
+ let NumMicroOps = 2;
+ let ResourceCycles = [1,2];
+}
+def : InstRW<[ZnWriteRSQRTSSLd], (instregex "(V?)RSQRTSSm(_Int)?")>;
+
+// RSQRTPSm
+def ZnWriteRSQRTPSLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+ let Latency = 12;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteRSQRTPSLd], (instregex "(V?)RSQRTPSm(_Int)?")>;
+
+// RSQRTPS 256.
+// y,y.
+def ZnWriteRSQRTPSYr : SchedWriteRes<[ZnFPU01]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+ let ResourceCycles = [2];
+}
+def : InstRW<[ZnWriteRSQRTPSYr], (instregex "VRSQRTPSYr(_Int)?")>;
+
+// y,m256.
+def ZnWriteRSQRTPSYLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+ let Latency = 12;
+ let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteRSQRTPSYLd], (instregex "VRSQRTPSYm(_Int)?")>;
+
+//-- Logic instructions --//
+
+// AND, ANDN, OR, XOR PS/PD.
+// x,x / v,v,v.
+def : InstRW<[WriteVecLogic], (instregex "(V?)(AND|ANDN|OR|XOR)P(S|D)(Y?)rr")>;
+// x,m / v,v,m.
+def : InstRW<[WriteVecLogicLd],
+ (instregex "(V?)(AND|ANDN|OR|XOR)P(S|D)(Y?)rm")>;
+
+//-- Other instructions --//
+
+// VZEROUPPER.
+def : InstRW<[WriteMicrocoded], (instregex "VZEROUPPER")>;
+
+// VZEROALL.
+def : InstRW<[WriteMicrocoded], (instregex "VZEROALL")>;
+
+// LDMXCSR.
+def : InstRW<[WriteMicrocoded], (instregex "(V)?LDMXCSR")>;
+
+// STMXCSR.
+def : InstRW<[WriteMicrocoded], (instregex "(V)?STMXCSR")>;
+
+} // SchedModel
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vaesdec (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x i64>, <2 x i64> *%a2, align 16
%2 = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1)
%3 = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %2, <2 x i64> %1)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x i64>, <2 x i64> *%a2, align 16
%2 = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1)
%3 = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %2, <2 x i64> %1)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vaesenc (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x i64>, <2 x i64> *%a2, align 16
%2 = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1)
%3 = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %2, <2 x i64> %1)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vaesenclast (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x i64>, <2 x i64> *%a2, align 16
%2 = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1)
%3 = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %2, <2 x i64> %1)
; ZNVER1-NEXT: vaesimc (%rdi), %xmm1 # sched: [11:0.50]
; ZNVER1-NEXT: vaesimc %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x i64>, <2 x i64> *%a1, align 16
%2 = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %a0)
%3 = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %1)
; ZNVER1-NEXT: vaeskeygenassist $7, (%rdi), %xmm1 # sched: [11:0.50]
; ZNVER1-NEXT: vaeskeygenassist $7, %xmm0, %xmm0 # sched: [4:0.50]
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x i64>, <2 x i64> *%a1, align 16
%2 = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7)
%3 = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %1, i8 7)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fadd <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fadd <4 x double> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fadd <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = fadd <8 x float> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %1, <4 x double> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %1, <8 x float> %2)
; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
%3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1>
; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
%3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1>
; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
%3 = and <4 x i64> %1, %2
; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
%3 = and <4 x i64> %1, %2
; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fadd <4 x double> %a1, %1
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 1, i32 10, i32 3, i32 12, i32 13, i32 14, i32 7>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
%2 = load <4 x double>, <4 x double> *%a3, align 32
%3 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %1, <4 x double> %2, <4 x double> %a2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
%2 = load <8 x float>, <8 x float> *%a3, align 32
%3 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %1, <8 x float> %2, <8 x float> %a2)
; ZNVER1-LABEL: test_broadcastf128:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x float>, <4 x float> *%a0, align 32
%2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
ret <8 x float> %2
; ZNVER1-LABEL: test_broadcastsd_ymm:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load double, double *%a0, align 8
%2 = insertelement <4 x double> undef, double %1, i32 0
%3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> zeroinitializer
; ZNVER1-LABEL: test_broadcastss:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load float, float *%a0, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer
; ZNVER1-LABEL: test_broadcastss_ymm:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load float, float *%a0, align 4
%2 = insertelement <8 x float> undef, float %1, i32 0
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> zeroinitializer
; ZNVER1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fcmp oeq <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fcmp oeq <4 x double> %a0, %2
; ZNVER1-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fcmp oeq <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = fcmp oeq <8 x float> %a0, %2
; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [5:1.00]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sitofp <4 x i32> %a0 to <4 x double>
%2 = load <4 x i32>, <4 x i32> *%a1, align 16
%3 = sitofp <4 x i32> %2 to <4 x double>
; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [5:1.00]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sitofp <8 x i32> %a0 to <8 x float>
%2 = load <8 x i32>, <8 x i32> *%a1, align 16
%3 = sitofp <8 x i32> %2 to <8 x float>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fptosi <4 x double> %a0 to <4 x i32>
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = fptosi <4 x double> %2 to <4 x i32>
;
; ZNVER1-LABEL: test_cvtpd2ps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00]
; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fptrunc <4 x double> %a0 to <4 x float>
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = fptrunc <4 x double> %2 to <4 x float>
; ZNVER1-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [5:1.00]
; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fptosi <8 x float> %a0 to <8 x i32>
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = fptosi <8 x float> %2 to <8 x i32>
;
; ZNVER1-LABEL: test_divpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:1.00]
-; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:15.00]
+; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:22.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fdiv <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fdiv <4 x double> %1, %2
;
; ZNVER1-LABEL: test_divps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [15:1.00]
-; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [22:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [12:12.00]
+; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [19:19.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fdiv <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = fdiv <8 x float> %1, %2
;
; ZNVER1-LABEL: test_dpps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %1, <8 x float> %2, i8 7)
;
; ZNVER1-LABEL: test_extractf128:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: vzeroupper
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.33]
+; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [8:0.50]
+; ZNVER1-NEXT: vzeroupper # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%2 = shufflevector <8 x float> %a1, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
store <4 x float> %2, <4 x float> *%a2
;
; ZNVER1-LABEL: test_haddpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %1, <4 x double> %2)
;
; ZNVER1-LABEL: test_haddps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %1, <8 x float> %2)
;
; ZNVER1-LABEL: test_hsubpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %1, <4 x double> %2)
;
; ZNVER1-LABEL: test_hsubps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %1, <8 x float> %2)
;
; ZNVER1-LABEL: test_insertf128:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50]
-; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [2:0.67]
+; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = shufflevector <8 x float> %a0, <8 x float> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
%3 = load <4 x float>, <4 x float> *%a2, align 16
; ZNVER1-LABEL: test_lddqu:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vlddqu (%rdi), %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0)
ret <32 x i8> %1
}
;
; ZNVER1-LABEL: test_maskmovpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2
-; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi)
+; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:0.50]
+; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [4:0.50]
; ZNVER1-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %a1)
call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %a1, <2 x double> %a2)
ret <2 x double> %1
;
; ZNVER1-LABEL: test_maskmovpd_ymm:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2
-; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi)
+; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:1.00]
+; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; ZNVER1-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %a1)
call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %a1, <4 x double> %a2)
ret <4 x double> %1
;
; ZNVER1-LABEL: test_maskmovps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2
-; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
+; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:0.50]
+; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [4:0.50]
; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %a1)
call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %a1, <4 x float> %a2)
ret <4 x float> %1
;
; ZNVER1-LABEL: test_maskmovps_ymm:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2
-; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi)
+; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:1.00]
+; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; ZNVER1-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %a1)
call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %a1, <8 x float> %a2)
ret <8 x float> %1
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %1, <4 x double> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %1, <8 x float> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %1, <4 x double> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %1, <8 x float> %2)
; ZNVER1-NEXT: vmovapd (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x double>, <4 x double> *%a0, align 32
%2 = fadd <4 x double> %1, %1
store <4 x double> %2, <4 x double> *%a1, align 32
; ZNVER1-NEXT: vmovaps (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <8 x float>, <8 x float> *%a0, align 32
%2 = fadd <8 x float> %1, %1
store <8 x float> %2, <8 x float> *%a1, align 32
; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [8:0.50]
; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
;
; ZNVER1-LABEL: test_movmskpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: vzeroupper
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:1.00]
+; ZNVER1-NEXT: vzeroupper # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0)
ret i32 %1
}
;
; ZNVER1-LABEL: test_movmskps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: vzeroupper
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:1.00]
+; ZNVER1-NEXT: vzeroupper # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0)
ret i32 %1
}
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fadd <4 x double> %a0, %a0
store <4 x double> %1, <4 x double> *%a1, align 32, !nontemporal !0
ret <4 x double> %1
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fadd <8 x float> %a0, %a0
store <8 x float> %1, <8 x float> *%a1, align 32, !nontemporal !0
ret <8 x float> %1
; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [8:0.50]
; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [8:0.50]
; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x double>, <4 x double> *%a0, align 1
%2 = fadd <4 x double> %1, %1
store <4 x double> %2, <4 x double> *%a1, align 1
; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <8 x float>, <8 x float> *%a0, align 1
%2 = fadd <8 x float> %1, %1
store <8 x float> %2, <8 x float> *%a1, align 1
;
; ZNVER1-LABEL: test_mulpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
+; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fmul <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fmul <4 x double> %1, %2
;
; ZNVER1-LABEL: test_mulps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
+; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fmul <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = fmul <8 x float> %1, %2
; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
%3 = or <4 x i64> %1, %2
; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
%3 = or <4 x i64> %1, %2
; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [8:0.50]
; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 1, i32 0>
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> <i32 1, i32 0>
; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:0.50]
; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
; ZNVER1-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50]
; ZNVER1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; ZNVER1-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:0.50]
; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1)
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1)
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1)
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
%3 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> %2)
; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [12:0.50]
; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:0.50]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0)
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %2)
;
; ZNVER1-LABEL: test_roundpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [10:1.00]
-; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [11:1.00]
+; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7)
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %2, i32 7)
;
; ZNVER1-LABEL: test_roundps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [10:1.00]
-; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [11:1.00]
+; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7)
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %2, i32 7)
; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [12:0.50]
; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [5:0.50]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0)
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %2)
; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50]
; ZNVER1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 4, i32 2, i32 7>
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 1, i32 4, i32 2, i32 7>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50]
; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 0, i32 8, i32 8, i32 4, i32 4, i32 12, i32 12>
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 3, i32 8, i32 8, i32 4, i32 7, i32 12, i32 12>
;
; ZNVER1-LABEL: test_sqrtpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [27:1.00]
-; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [20:1.00]
+; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [47:47.00]
+; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [40:40.00]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0)
%2 = load <4 x double>, <4 x double> *%a1, align 32
%3 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %2)
;
; ZNVER1-LABEL: test_sqrtps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [27:1.00]
-; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [20:1.00]
+; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [35:35.00]
+; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [28:28.00]
; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0)
%2 = load <8 x float>, <8 x float> *%a1, align 32
%3 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fsub <4 x double> %a0, %a1
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = fsub <4 x double> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fsub <8 x float> %a0, %a1
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = fsub <8 x float> %1, %2
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
; ZNVER1-NEXT: vtestpd (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %2)
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: vzeroupper
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vzeroupper # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1)
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %2)
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
; ZNVER1-NEXT: vtestps (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %2)
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: vzeroupper
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vzeroupper # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1)
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %2)
; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50]
; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.50]
; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50]
; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
%2 = load <4 x double>, <4 x double> *%a2, align 32
%3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.50]
; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
%2 = load <8 x float>, <8 x float> *%a2, align 32
%3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <4 x double> %a0 to <4 x i64>
%2 = bitcast <4 x double> %a1 to <4 x i64>
%3 = xor <4 x i64> %1, %2
; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <8 x float> %a0 to <4 x i64>
%2 = bitcast <8 x float> %a1 to <4 x i64>
%3 = xor <4 x i64> %1, %2
;
; ZNVER1-LABEL: test_zeroall:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vzeroall
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vzeroall # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.avx.vzeroall()
ret void
}
;
; ZNVER1-LABEL: test_zeroupper:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vzeroupper
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vzeroupper # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.avx.vzeroupper()
ret void
}
; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0)
%2 = load <32 x i8>, <32 x i8> *%a1, align 32
%3 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %2)
; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0)
%2 = load <8 x i32>, <8 x i32> *%a1, align 32
%3 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %2)
; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0)
%2 = load <16 x i16>, <16 x i16> *%a1, align 32
%3 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = add <32 x i8> %a0, %a1
%2 = load <32 x i8>, <32 x i8> *%a2, align 32
%3 = add <32 x i8> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = add <8 x i32> %a0, %a1
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
%3 = add <8 x i32> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = add <4 x i64> %a0, %a1
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = add <4 x i64> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = add <16 x i16> %a0, %a1
%2 = load <16 x i16>, <16 x i16> *%a2, align 32
%3 = add <16 x i16> %1, %2
; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = and <4 x i64> %a0, %a1
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = and <4 x i64> %1, %2
; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = xor <4 x i64> %a0, <i64 -1, i64 -1, i64 -1, i64 -1>
%2 = and <4 x i64> %a1, %1
%3 = load <4 x i64>, <4 x i64> *%a2, align 32
;
; ZNVER1-LABEL: test_pmulld:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
-; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
+; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = mul <8 x i32> %a0, %a1
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
%3 = mul <8 x i32> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = mul <16 x i16> %a0, %a1
%2 = load <16 x i16>, <16 x i16> *%a2, align 32
%3 = mul <16 x i16> %1, %2
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = or <4 x i64> %a0, %a1
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = or <4 x i64> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sub <32 x i8> %a0, %a1
%2 = load <32 x i8>, <32 x i8> *%a2, align 32
%3 = sub <32 x i8> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sub <8 x i32> %a0, %a1
%2 = load <8 x i32>, <8 x i32> *%a2, align 32
%3 = sub <8 x i32> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sub <4 x i64> %a0, %a1
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = sub <4 x i64> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sub <16 x i16> %a0, %a1
%2 = load <16 x i16>, <16 x i16> *%a2, align 32
%3 = sub <16 x i16> %1, %2
; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = xor <4 x i64> %a0, %a1
%2 = load <4 x i64>, <4 x i64> *%a2, align 32
%3 = xor <4 x i64> %1, %2
; ZNVER1-NEXT: andw (%rdx), %di # sched: [5:0.50]
; ZNVER1-NEXT: addl %edi, %eax # sched: [1:0.25]
; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a2
%2 = xor i16 %a0, -1
%3 = and i16 %2, %a1
; ZNVER1-NEXT: andnl (%rdx), %edi, %eax # sched: [5:0.50]
; ZNVER1-NEXT: andnl %esi, %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a2
%2 = xor i32 %a0, -1
%3 = and i32 %2, %a1
; ZNVER1-NEXT: andnq (%rdx), %rdi, %rax # sched: [5:0.50]
; ZNVER1-NEXT: andnq %rsi, %rdi, %rcx # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a2
%2 = xor i64 %a0, -1
%3 = and i64 %2, %a1
;
; ZNVER1-LABEL: test_bextr_i32:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: bextrl %edi, (%rdx), %ecx
-; ZNVER1-NEXT: bextrl %edi, %esi, %eax
+; ZNVER1-NEXT: bextrl %edi, (%rdx), %ecx # sched: [5:0.50]
+; ZNVER1-NEXT: bextrl %edi, %esi, %eax # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a2
%2 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %1, i32 %a0)
%3 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %a1, i32 %a0)
;
; ZNVER1-LABEL: test_bextr_i64:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: bextrq %rdi, (%rdx), %rcx
-; ZNVER1-NEXT: bextrq %rdi, %rsi, %rax
+; ZNVER1-NEXT: bextrq %rdi, (%rdx), %rcx # sched: [5:0.50]
+; ZNVER1-NEXT: bextrq %rdi, %rsi, %rax # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a2
%2 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %1, i64 %a0)
%3 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %a1, i64 %a0)
;
; ZNVER1-LABEL: test_blsi_i32:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: blsil (%rsi), %ecx
-; ZNVER1-NEXT: blsil %edi, %eax
+; ZNVER1-NEXT: blsil (%rsi), %ecx # sched: [6:0.50]
+; ZNVER1-NEXT: blsil %edi, %eax # sched: [2:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a1
%2 = sub i32 0, %1
%3 = sub i32 0, %a0
;
; ZNVER1-LABEL: test_blsi_i64:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: blsiq (%rsi), %rcx
-; ZNVER1-NEXT: blsiq %rdi, %rax
+; ZNVER1-NEXT: blsiq (%rsi), %rcx # sched: [6:0.50]
+; ZNVER1-NEXT: blsiq %rdi, %rax # sched: [2:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a1
%2 = sub i64 0, %1
%3 = sub i64 0, %a0
;
; ZNVER1-LABEL: test_blsmsk_i32:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: blsmskl (%rsi), %ecx
-; ZNVER1-NEXT: blsmskl %edi, %eax
+; ZNVER1-NEXT: blsmskl (%rsi), %ecx # sched: [6:0.50]
+; ZNVER1-NEXT: blsmskl %edi, %eax # sched: [2:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a1
%2 = sub i32 %1, 1
%3 = sub i32 %a0, 1
;
; ZNVER1-LABEL: test_blsmsk_i64:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: blsmskq (%rsi), %rcx
-; ZNVER1-NEXT: blsmskq %rdi, %rax
+; ZNVER1-NEXT: blsmskq (%rsi), %rcx # sched: [6:0.50]
+; ZNVER1-NEXT: blsmskq %rdi, %rax # sched: [2:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a1
%2 = sub i64 %1, 1
%3 = sub i64 %a0, 1
;
; ZNVER1-LABEL: test_blsr_i32:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: blsrl (%rsi), %ecx
-; ZNVER1-NEXT: blsrl %edi, %eax
+; ZNVER1-NEXT: blsrl (%rsi), %ecx # sched: [6:0.50]
+; ZNVER1-NEXT: blsrl %edi, %eax # sched: [2:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a1
%2 = sub i32 %1, 1
%3 = sub i32 %a0, 1
;
; ZNVER1-LABEL: test_blsr_i64:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: blsrq (%rsi), %rcx
-; ZNVER1-NEXT: blsrq %rdi, %rax
+; ZNVER1-NEXT: blsrq (%rsi), %rcx # sched: [6:0.50]
+; ZNVER1-NEXT: blsrq %rdi, %rax # sched: [2:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a1
%2 = sub i64 %1, 1
%3 = sub i64 %a0, 1
;
; ZNVER1-LABEL: test_cttz_i16:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: tzcntw (%rsi), %cx
-; ZNVER1-NEXT: tzcntw %di, %ax
+; ZNVER1-NEXT: tzcntw (%rsi), %cx # sched: [6:0.50]
+; ZNVER1-NEXT: tzcntw %di, %ax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.cttz.i16( i16 %1, i1 false )
%3 = tail call i16 @llvm.cttz.i16( i16 %a0, i1 false )
;
; ZNVER1-LABEL: test_cttz_i32:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: tzcntl (%rsi), %ecx
-; ZNVER1-NEXT: tzcntl %edi, %eax
+; ZNVER1-NEXT: tzcntl (%rsi), %ecx # sched: [6:0.50]
+; ZNVER1-NEXT: tzcntl %edi, %eax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a1
%2 = tail call i32 @llvm.cttz.i32( i32 %1, i1 false )
%3 = tail call i32 @llvm.cttz.i32( i32 %a0, i1 false )
;
; ZNVER1-LABEL: test_cttz_i64:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: tzcntq (%rsi), %rcx
-; ZNVER1-NEXT: tzcntq %rdi, %rax
+; ZNVER1-NEXT: tzcntq (%rsi), %rcx # sched: [6:0.50]
+; ZNVER1-NEXT: tzcntq %rdi, %rax # sched: [2:0.25]
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a1
%2 = tail call i64 @llvm.cttz.i64( i64 %1, i1 false )
%3 = tail call i64 @llvm.cttz.i64( i64 %a0, i1 false )
;
; ZNVER1-LABEL: test_bzhi_i32:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: bzhil %edi, (%rdx), %ecx
-; ZNVER1-NEXT: bzhil %edi, %esi, %eax
+; ZNVER1-NEXT: bzhil %edi, (%rdx), %ecx # sched: [5:0.50]
+; ZNVER1-NEXT: bzhil %edi, %esi, %eax # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a2
%2 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %1, i32 %a0)
%3 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a1, i32 %a0)
;
; ZNVER1-LABEL: test_bzhi_i64:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: bzhiq %rdi, (%rdx), %rcx
-; ZNVER1-NEXT: bzhiq %rdi, %rsi, %rax
+; ZNVER1-NEXT: bzhiq %rdi, (%rdx), %rcx # sched: [5:0.50]
+; ZNVER1-NEXT: bzhiq %rdi, %rsi, %rax # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a2
%2 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %1, i64 %a0)
%3 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a1, i64 %a0)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: movq %rdx, %rax # sched: [1:0.25]
; ZNVER1-NEXT: movq %rdi, %rdx # sched: [1:0.25]
-; ZNVER1-NEXT: mulxq %rsi, %rsi, %rcx # sched: [4:2.00]
-; ZNVER1-NEXT: mulxq (%rax), %rdx, %rax # sched: [8:2.00]
+; ZNVER1-NEXT: mulxq %rsi, %rsi, %rcx # sched: [3:1.00]
+; ZNVER1-NEXT: mulxq (%rax), %rdx, %rax # sched: [8:1.00]
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a2
%2 = zext i64 %a0 to i128
%3 = zext i64 %a1 to i128
;
; ZNVER1-LABEL: test_pdep_i32:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: pdepl (%rdx), %edi, %ecx
-; ZNVER1-NEXT: pdepl %esi, %edi, %eax
+; ZNVER1-NEXT: pdepl (%rdx), %edi, %ecx # sched: [100:?]
+; ZNVER1-NEXT: pdepl %esi, %edi, %eax # sched: [100:?]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a2
%2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %1)
%3 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %a1)
;
; ZNVER1-LABEL: test_pdep_i64:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: pdepq (%rdx), %rdi, %rcx
-; ZNVER1-NEXT: pdepq %rsi, %rdi, %rax
+; ZNVER1-NEXT: pdepq (%rdx), %rdi, %rcx # sched: [100:?]
+; ZNVER1-NEXT: pdepq %rsi, %rdi, %rax # sched: [100:?]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a2
%2 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %1)
%3 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %a1)
;
; ZNVER1-LABEL: test_pext_i32:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: pextl (%rdx), %edi, %ecx
-; ZNVER1-NEXT: pextl %esi, %edi, %eax
+; ZNVER1-NEXT: pextl (%rdx), %edi, %ecx # sched: [100:?]
+; ZNVER1-NEXT: pextl %esi, %edi, %eax # sched: [100:?]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a2
%2 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %1)
%3 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %a1)
;
; ZNVER1-LABEL: test_pext_i64:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: pextq (%rdx), %rdi, %rcx
-; ZNVER1-NEXT: pextq %rsi, %rdi, %rax
+; ZNVER1-NEXT: pextq (%rdx), %rdi, %rcx # sched: [100:?]
+; ZNVER1-NEXT: pextq %rsi, %rdi, %rax # sched: [100:?]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a2
%2 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %1)
%3 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %a1)
; ZNVER1-NEXT: rorxl $5, (%rdx), %eax # sched: [5:0.50]
; ZNVER1-NEXT: rorxl $5, %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a2
%2 = lshr i32 %a0, 5
%3 = shl i32 %a0, 27
; ZNVER1-NEXT: rorxq $5, (%rdx), %rax # sched: [5:0.50]
; ZNVER1-NEXT: rorxq $5, %rdi, %rcx # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a2
%2 = lshr i64 %a0, 5
%3 = shl i64 %a0, 59
; ZNVER1-NEXT: sarxl %esi, (%rdx), %eax # sched: [5:0.50]
; ZNVER1-NEXT: sarxl %esi, %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a2
%2 = ashr i32 %a0, %a1
%3 = ashr i32 %1, %a1
; ZNVER1-NEXT: sarxq %rsi, (%rdx), %rax # sched: [5:0.50]
; ZNVER1-NEXT: sarxq %rsi, %rdi, %rcx # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a2
%2 = ashr i64 %a0, %a1
%3 = ashr i64 %1, %a1
; ZNVER1-NEXT: shlxl %esi, (%rdx), %eax # sched: [5:0.50]
; ZNVER1-NEXT: shlxl %esi, %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a2
%2 = shl i32 %a0, %a1
%3 = shl i32 %1, %a1
; ZNVER1-NEXT: shlxq %rsi, (%rdx), %rax # sched: [5:0.50]
; ZNVER1-NEXT: shlxq %rsi, %rdi, %rcx # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a2
%2 = shl i64 %a0, %a1
%3 = shl i64 %1, %a1
; ZNVER1-NEXT: shrxl %esi, (%rdx), %eax # sched: [5:0.50]
; ZNVER1-NEXT: shrxl %esi, %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a2
%2 = lshr i32 %a0, %a1
%3 = lshr i32 %1, %a1
; ZNVER1-NEXT: shrxq %rsi, (%rdx), %rax # sched: [5:0.50]
; ZNVER1-NEXT: shrxq %rsi, %rdi, %rcx # sched: [1:0.25]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a2
%2 = lshr i64 %a0, %a1
%3 = lshr i64 %1, %a1
;
; ZNVER1-LABEL: test_vcvtph2ps_128:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [12:1.00]
-; ZNVER1-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtph2ps (%rdi), %xmm1 # sched: [100:?]
+; ZNVER1-NEXT: vcvtph2ps %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <8 x i16>, <8 x i16> *%a1
%2 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %1)
%3 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0)
;
; ZNVER1-LABEL: test_vcvtph2ps_256:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [12:1.00]
-; ZNVER1-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtph2ps (%rdi), %ymm1 # sched: [100:?]
+; ZNVER1-NEXT: vcvtph2ps %xmm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <8 x i16>, <8 x i16> *%a1
%2 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %1)
%3 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0)
;
; ZNVER1-LABEL: test_vcvtps2ph_128:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [12:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vcvtps2ph $0, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vcvtps2ph $0, %xmm1, (%rdi) # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0)
%2 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a1, i32 0)
%3 = shufflevector <8 x i16> %2, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
;
; ZNVER1-LABEL: test_vcvtps2ph_256:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [12:1.00]
-; ZNVER1-NEXT: vzeroupper
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vcvtps2ph $0, %ymm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vcvtps2ph $0, %ymm1, (%rdi) # sched: [100:?]
+; ZNVER1-NEXT: vzeroupper # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0)
%2 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a1, i32 0)
store <8 x i16> %2, <8 x i16> *%a2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal -24(%rdi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i32 %0, -24
ret i32 %2
}
; ZNVER1: # BB#0:
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal 1024(%rdi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i32 %0, 1024
ret i32 %2
}
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add nsw i32 %1, %0
ret i32 %3
}
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i32 %0, 16
%4 = add i32 %3, %1
ret i32 %4
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i32 %0, -4096
%4 = add i32 %3, %1
ret i32 %4
; ZNVER1: # BB#0:
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 3
ret i32 %2
}
; ZNVER1: # BB#0:
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 3
%3 = add nsw i32 %2, -32
ret i32 %3
; ZNVER1: # BB#0:
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 9
%3 = add nsw i32 %2, 10000
ret i32 %3
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 1
%4 = add nsw i32 %3, %0
ret i32 %4
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 2
%4 = add i32 %0, 96
%5 = add i32 %4, %3
; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; ZNVER1-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 3
%4 = add i32 %0, -1200
%5 = add i32 %4, %3
; ZNVER1-LABEL: test_lea_offset:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq -24(%rdi), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i64 %0, -24
ret i64 %2
}
; ZNVER1-LABEL: test_lea_offset_big:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq 1024(%rdi), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i64 %0, 1024
ret i64 %2
}
; ZNVER1-LABEL: test_lea_add:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq (%rdi,%rsi), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add nsw i64 %1, %0
ret i64 %3
}
; ZNVER1-LABEL: test_lea_add_offset:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq 16(%rdi,%rsi), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i64 %0, 16
%4 = add i64 %3, %1
ret i64 %4
; ZNVER1-LABEL: test_lea_add_offset_big:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq -4096(%rdi,%rsi), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i64 %0, -4096
%4 = add i64 %3, %1
ret i64 %4
; ZNVER1-LABEL: test_lea_mul:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq (%rdi,%rdi,2), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i64 %0, 3
ret i64 %2
}
; ZNVER1-LABEL: test_lea_mul_offset:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i64 %0, 3
%3 = add nsw i64 %2, -32
ret i64 %3
; ZNVER1-LABEL: test_lea_mul_offset_big:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i64 %0, 9
%3 = add nsw i64 %2, 10000
ret i64 %3
; ZNVER1-LABEL: test_lea_add_scale:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq (%rdi,%rsi,2), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i64 %1, 1
%4 = add nsw i64 %3, %0
ret i64 %4
; ZNVER1-LABEL: test_lea_add_scale_offset:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i64 %1, 2
%4 = add i64 %0, 96
%5 = add i64 %4, %3
; ZNVER1-LABEL: test_lea_add_scale_offset_big:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i64 %1, 3
%4 = add i64 %0, -1200
%5 = add i64 %4, %3
;
; ZNVER1-LABEL: test_ctlz_i16:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: lzcntw (%rsi), %cx
-; ZNVER1-NEXT: lzcntw %di, %ax
+; ZNVER1-NEXT: lzcntw (%rsi), %cx # sched: [6:0.50]
+; ZNVER1-NEXT: lzcntw %di, %ax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.ctlz.i16( i16 %1, i1 false )
%3 = tail call i16 @llvm.ctlz.i16( i16 %a0, i1 false )
;
; ZNVER1-LABEL: test_ctlz_i32:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: lzcntl (%rsi), %ecx
-; ZNVER1-NEXT: lzcntl %edi, %eax
+; ZNVER1-NEXT: lzcntl (%rsi), %ecx # sched: [6:0.50]
+; ZNVER1-NEXT: lzcntl %edi, %eax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a1
%2 = tail call i32 @llvm.ctlz.i32( i32 %1, i1 false )
%3 = tail call i32 @llvm.ctlz.i32( i32 %a0, i1 false )
;
; ZNVER1-LABEL: test_ctlz_i64:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: lzcntq (%rsi), %rcx
-; ZNVER1-NEXT: lzcntq %rdi, %rax
+; ZNVER1-NEXT: lzcntq (%rsi), %rcx # sched: [6:0.50]
+; ZNVER1-NEXT: lzcntq %rdi, %rax # sched: [2:0.25]
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a1
%2 = tail call i64 @llvm.ctlz.i64( i64 %1, i1 false )
%3 = tail call i64 @llvm.ctlz.i64( i64 %a0, i1 false )
; ZNVER1-LABEL: test_ctlz_i16:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: movbew (%rdi), %ax # sched: [5:0.50]
-; ZNVER1-NEXT: movbew %si, (%rdx) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: movbew %si, (%rdx) # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a0
%2 = tail call i16 @llvm.bswap.i16( i16 %1 )
%3 = tail call i16 @llvm.bswap.i16( i16 %a1 )
; ZNVER1-LABEL: test_ctlz_i32:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: movbel (%rdi), %eax # sched: [5:0.50]
-; ZNVER1-NEXT: movbel %esi, (%rdx) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: movbel %esi, (%rdx) # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a0
%2 = tail call i32 @llvm.bswap.i32( i32 %1 )
%3 = tail call i32 @llvm.bswap.i32( i32 %a1 )
; ZNVER1-LABEL: test_ctlz_i64:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: movbeq (%rdi), %rax # sched: [5:0.50]
-; ZNVER1-NEXT: movbeq %rsi, (%rdx) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: movbeq %rsi, (%rdx) # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a0
%2 = tail call i64 @llvm.bswap.i64( i64 %1 )
%3 = tail call i64 @llvm.bswap.i64( i64 %a1 )
; ZNVER1-NEXT: popcntw %di, %ax # sched: [3:1.00]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.ctpop.i16( i16 %1 )
%3 = tail call i16 @llvm.ctpop.i16( i16 %a0 )
; ZNVER1-NEXT: popcntl (%rsi), %ecx # sched: [10:1.00]
; ZNVER1-NEXT: popcntl %edi, %eax # sched: [3:1.00]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i32, i32 *%a1
%2 = tail call i32 @llvm.ctpop.i32( i32 %1 )
%3 = tail call i32 @llvm.ctpop.i32( i32 %a0 )
; ZNVER1-NEXT: popcntq (%rsi), %rcx # sched: [10:1.00]
; ZNVER1-NEXT: popcntq %rdi, %rax # sched: [3:1.00]
; ZNVER1-NEXT: orq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64 *%a1
%2 = tail call i64 @llvm.ctpop.i64( i64 %1 )
%3 = tail call i64 @llvm.ctpop.i64( i64 %a0 )
;
; ZNVER1-LABEL: test_sha1msg1:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: sha1msg1 %xmm1, %xmm0
-; ZNVER1-NEXT: sha1msg1 (%rdi), %xmm0
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: sha1msg1 %xmm1, %xmm0 # sched: [2:1.00]
+; ZNVER1-NEXT: sha1msg1 (%rdi), %xmm0 # sched: [9:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x i32>, <4 x i32>* %a2
%2 = tail call <4 x i32> @llvm.x86.sha1msg1(<4 x i32> %a0, <4 x i32> %a1)
%3 = tail call <4 x i32> @llvm.x86.sha1msg1(<4 x i32> %2, <4 x i32> %1)
;
; ZNVER1-LABEL: test_sha1msg2:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: sha1msg2 %xmm1, %xmm0
-; ZNVER1-NEXT: sha1msg2 (%rdi), %xmm0
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: sha1msg2 %xmm1, %xmm0 # sched: [1:0.50]
+; ZNVER1-NEXT: sha1msg2 (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x i32>, <4 x i32>* %a2
%2 = tail call <4 x i32> @llvm.x86.sha1msg2(<4 x i32> %a0, <4 x i32> %a1)
%3 = tail call <4 x i32> @llvm.x86.sha1msg2(<4 x i32> %2, <4 x i32> %1)
;
; ZNVER1-LABEL: test_sha1nexte:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: sha1nexte %xmm1, %xmm0
-; ZNVER1-NEXT: sha1nexte (%rdi), %xmm0
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: sha1nexte %xmm1, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT: sha1nexte (%rdi), %xmm0 # sched: [8:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x i32>, <4 x i32>* %a2
%2 = tail call <4 x i32> @llvm.x86.sha1nexte(<4 x i32> %a0, <4 x i32> %a1)
%3 = tail call <4 x i32> @llvm.x86.sha1nexte(<4 x i32> %2, <4 x i32> %1)
;
; ZNVER1-LABEL: test_sha1rnds4:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: sha1rnds4 $3, %xmm1, %xmm0
-; ZNVER1-NEXT: sha1rnds4 $3, (%rdi), %xmm0
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: sha1rnds4 $3, %xmm1, %xmm0 # sched: [6:1.00]
+; ZNVER1-NEXT: sha1rnds4 $3, (%rdi), %xmm0 # sched: [13:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x i32>, <4 x i32>* %a2
%2 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %a0, <4 x i32> %a1, i8 3)
%3 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %2, <4 x i32> %1, i8 3)
;
; ZNVER1-LABEL: test_sha256msg1:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: sha256msg1 %xmm1, %xmm0
-; ZNVER1-NEXT: sha256msg1 (%rdi), %xmm0
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: sha256msg1 %xmm1, %xmm0 # sched: [2:1.00]
+; ZNVER1-NEXT: sha256msg1 (%rdi), %xmm0 # sched: [9:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x i32>, <4 x i32>* %a2
%2 = tail call <4 x i32> @llvm.x86.sha256msg1(<4 x i32> %a0, <4 x i32> %a1)
%3 = tail call <4 x i32> @llvm.x86.sha256msg1(<4 x i32> %2, <4 x i32> %1)
;
; ZNVER1-LABEL: test_sha256msg2:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: sha256msg2 %xmm1, %xmm0
-; ZNVER1-NEXT: sha256msg2 (%rdi), %xmm0
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: sha256msg2 %xmm1, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: sha256msg2 (%rdi), %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x i32>, <4 x i32>* %a2
%2 = tail call <4 x i32> @llvm.x86.sha256msg2(<4 x i32> %a0, <4 x i32> %a1)
%3 = tail call <4 x i32> @llvm.x86.sha256msg2(<4 x i32> %2, <4 x i32> %1)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovaps %xmm0, %xmm3 # sched: [1:0.50]
; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3
-; ZNVER1-NEXT: sha256rnds2 %xmm0, (%rdi), %xmm3
+; ZNVER1-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3 # sched: [4:1.00]
+; ZNVER1-NEXT: sha256rnds2 %xmm0, (%rdi), %xmm3 # sched: [11:1.00]
; ZNVER1-NEXT: vmovaps %xmm3, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x i32>, <4 x i32>* %a3
%2 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
%3 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %2, <4 x i32> %1, <4 x i32> %a2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fadd <4 x float> %a0, %a1
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = fadd <4 x float> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fadd float %a0, %a1
%2 = load float, float *%a2, align 4
%3 = fadd float %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <4 x float> %a0 to <4 x i32>
%2 = bitcast <4 x float> %a1 to <4 x i32>
%3 = and <4 x i32> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <4 x float> %a0 to <4 x i32>
%2 = bitcast <4 x float> %a1 to <4 x i32>
%3 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
; ZNVER1-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fcmp oeq <4 x float> %a0, %a1
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = fcmp oeq <4 x float> %a0, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <4 x float> undef, float %a0, i32 0
%2 = insertelement <4 x float> undef, float %a1, i32 0
%3 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %1, <4 x float> %2, i8 0)
; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25]
; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25]
; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 4
%3 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %2)
; ZNVER1-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sitofp i32 %a0 to float
%2 = load i32, i32 *%a1, align 4
%3 = sitofp i32 %2 to float
; ZNVER1-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sitofp i64 %a0 to float
%2 = load i64, i64 *%a1, align 8
%3 = sitofp i64 %2 to float
; ZNVER1-NEXT: vcvtss2si (%rdi), %eax # sched: [12:1.00]
; ZNVER1-NEXT: vcvtss2si %xmm0, %ecx # sched: [5:1.00]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <4 x float> undef, float %a0, i32 0
%2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %1)
%3 = load float, float *%a1, align 4
; ZNVER1-NEXT: vcvtss2si (%rdi), %rax # sched: [12:1.00]
; ZNVER1-NEXT: vcvtss2si %xmm0, %rcx # sched: [5:1.00]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <4 x float> undef, float %a0, i32 0
%2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %1)
%3 = load float, float *%a1, align 4
; ZNVER1-NEXT: vcvttss2si (%rdi), %eax # sched: [12:1.00]
; ZNVER1-NEXT: vcvttss2si %xmm0, %ecx # sched: [5:1.00]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fptosi float %a0 to i32
%2 = load float, float *%a1, align 4
%3 = fptosi float %2 to i32
; ZNVER1-NEXT: vcvttss2si (%rdi), %rax # sched: [12:1.00]
; ZNVER1-NEXT: vcvttss2si %xmm0, %rcx # sched: [5:1.00]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fptosi float %a0 to i64
%2 = load float, float *%a1, align 4
%3 = fptosi float %2 to i64
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
; ZNVER1-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fdiv <4 x float> %a0, %a1
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = fdiv <4 x float> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
; ZNVER1-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fdiv float %a0, %a1
%2 = load float, float *%a2, align 4
%3 = fdiv float %1, %2
; ZNVER1-LABEL: test_ldmxcsr:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:0.50]
-; ZNVER1-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = alloca i32, align 4
%2 = bitcast i32* %1 to i8*
store i32 %a0, i32* %1
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %1, <4 x float> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %1, <4 x float> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %1, <4 x float> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %1, <4 x float> %2)
; ZNVER1-NEXT: vmovaps (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x float>, <4 x float> *%a0, align 16
%2 = fadd <4 x float> %1, %1
store <4 x float> %2, <4 x float> *%a1, align 16
; ZNVER1-LABEL: test_movhlps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
ret <4 x float> %1
}
; ZNVER1-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast x86_mmx* %a2 to <2 x float>*
%2 = load <2 x float>, <2 x float> *%1, align 8
%3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
%2 = fadd <4 x float> %a1, %1
ret <4 x float> %2
; ZNVER1-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast x86_mmx* %a2 to <2 x float>*
%2 = load <2 x float>, <2 x float> *%1, align 8
%3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
;
; ZNVER1-LABEL: test_movmskps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovmskps %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmovmskps %xmm0, %eax # sched: [1:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
ret i32 %1
}
; ZNVER1-LABEL: test_movntps:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
store <4 x float> %a0, <4 x float> *%a1, align 16, !nontemporal !0
ret void
}
; ZNVER1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovss %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load float, float* %a0, align 1
%2 = fadd float %1, %1
store float %2, float *%a1, align 1
; ZNVER1-LABEL: test_movss_reg:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
ret <4 x float> %1
}
; ZNVER1-NEXT: vmovups (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovups %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <4 x float>, <4 x float> *%a0, align 1
%2 = fadd <4 x float> %1, %1
store <4 x float> %2, <4 x float> *%a1, align 1
;
; ZNVER1-LABEL: test_mulps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
+; ZNVER1-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fmul <4 x float> %a0, %a1
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = fmul <4 x float> %1, %2
;
; ZNVER1-LABEL: test_mulss:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
+; ZNVER1-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fmul float %a0, %a1
%2 = load float, float *%a2, align 4
%3 = fmul float %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <4 x float> %a0 to <4 x i32>
%2 = bitcast <4 x float> %a1 to <4 x i32>
%3 = or <4 x i32> %1, %2
; ZNVER1-LABEL: test_prefetchnta:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: prefetchnta (%rdi) # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1)
ret void
}
; ZNVER1-NEXT: vrcpps (%rdi), %xmm1 # sched: [12:0.50]
; ZNVER1-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %2)
; ZNVER1-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [12:0.50]
; ZNVER1-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [12:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <4 x float> undef, float %a0, i32 0
%2 = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %1)
%3 = load float, float *%a1, align 4
; ZNVER1-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [12:0.50]
; ZNVER1-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %2)
; ZNVER1-LABEL: test_rsqrtss:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
-; ZNVER1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [12:0.50]
+; ZNVER1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:0.50]
+; ZNVER1-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <4 x float> undef, float %a0, i32 0
%2 = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %1)
%3 = load float, float *%a1, align 4
; ZNVER1-LABEL: test_sfence:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: sfence # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.sse.sfence()
ret void
}
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50]
; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 0, i32 4, i32 4>
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 3, i32 4, i32 4>
; ZNVER1-NEXT: vsqrtps (%rdi), %xmm1 # sched: [27:1.00]
; ZNVER1-NEXT: vsqrtps %xmm0, %xmm0 # sched: [20:1.00]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %2)
; ZNVER1-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [27:1.00]
; ZNVER1-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [27:1.00]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %2)
;
; ZNVER1-LABEL: test_stmxcsr:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:0.50]
+; ZNVER1-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?]
; ZNVER1-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = alloca i32, align 4
%2 = bitcast i32* %1 to i8*
call void @llvm.x86.sse.stmxcsr(i8* %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fsub <4 x float> %a0, %a1
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = fsub <4 x float> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fsub float %a0, %a1
%2 = load float, float *%a2, align 4
%3 = fsub float %1, %2
; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25]
; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25]
; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 4
%3 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <4 x float> %a0 to <4 x i32>
%2 = bitcast <4 x float> %a1 to <4 x i32>
%3 = xor <4 x i32> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fadd <2 x double> %a0, %a1
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fadd <2 x double> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fadd double %a0, %a1
%2 = load double, double *%a2, align 8
%3 = fadd double %1, %2
; ZNVER1-NEXT: vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <2 x double> %a0 to <4 x i32>
%2 = bitcast <2 x double> %a1 to <4 x i32>
%3 = and <4 x i32> %1, %2
; ZNVER1-NEXT: vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vandnpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <2 x double> %a0 to <4 x i32>
%2 = bitcast <2 x double> %a1 to <4 x i32>
%3 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
; ZNVER1-NEXT: vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
; ZNVER1-NEXT: vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fcmp oeq <2 x double> %a0, %a1
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fcmp oeq <2 x double> %a0, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <2 x double> undef, double %a0, i32 0
%2 = insertelement <2 x double> undef, double %a1, i32 0
%3 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %1, <2 x double> %2, i8 0)
; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25]
; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25]
; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 8
%3 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %2)
; ZNVER1-NEXT: vcvtdq2pd (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtdq2pd %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%2 = sitofp <2 x i32> %1 to <2 x double>
%3 = load <4 x i32>, <4 x i32>*%a1, align 16
; ZNVER1-NEXT: vcvtdq2ps (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sitofp <4 x i32> %a0 to <4 x float>
%2 = load <4 x i32>, <4 x i32>*%a1, align 16
%3 = sitofp <4 x i32> %2 to <4 x float>
; ZNVER1-NEXT: vcvtpd2dqx (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtpd2dq %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %2)
;
; ZNVER1-LABEL: test_cvtpd2ps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [12:1.00]
-; ZNVER1-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtpd2psx (%rdi), %xmm1 # sched: [11:1.00]
+; ZNVER1-NEXT: vcvtpd2ps %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %2)
; ZNVER1-NEXT: vcvtps2dq (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvtps2dq %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %2)
;
; ZNVER1-LABEL: test_cvtps2pd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [12:1.00]
-; ZNVER1-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtps2pd (%rdi), %xmm1 # sched: [10:1.00]
+; ZNVER1-NEXT: vcvtps2pd %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <2 x i32> <i32 0, i32 1>
%2 = fpext <2 x float> %1 to <2 x double>
%3 = load <4 x float>, <4 x float> *%a1, align 16
; ZNVER1-NEXT: vcvtsd2si (%rdi), %eax # sched: [12:1.00]
; ZNVER1-NEXT: vcvtsd2si %xmm0, %ecx # sched: [5:1.00]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <2 x double> undef, double %a0, i32 0
%2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %1)
%3 = load double, double *%a1, align 8
; ZNVER1-NEXT: vcvtsd2si (%rdi), %rax # sched: [12:1.00]
; ZNVER1-NEXT: vcvtsd2si %xmm0, %rcx # sched: [5:1.00]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <2 x double> undef, double %a0, i32 0
%2 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %1)
%3 = load double, double *%a1, align 8
; ZNVER1-LABEL: test_cvtsd2ss:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [8:0.50]
-; ZNVER1-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fptrunc double %a0 to float
%2 = load double, double *%a1, align 8
%3 = fptrunc double %2 to float
; ZNVER1-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sitofp i32 %a0 to double
%2 = load i32, i32 *%a1, align 8
%3 = sitofp i32 %2 to double
; ZNVER1-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sitofp i64 %a0 to double
%2 = load i64, i64 *%a1, align 8
%3 = sitofp i64 %2 to double
; ZNVER1-LABEL: test_cvtss2sd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
-; ZNVER1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
+; ZNVER1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
; ZNVER1-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fpext float %a0 to double
%2 = load float, float *%a1, align 4
%3 = fpext float %2 to double
; ZNVER1-NEXT: vcvttpd2dqx (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvttpd2dq %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fptosi <2 x double> %a0 to <2 x i32>
%2 = shufflevector <2 x i32> %1, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%3 = load <2 x double>, <2 x double> *%a1, align 16
; ZNVER1-NEXT: vcvttps2dq (%rdi), %xmm1 # sched: [12:1.00]
; ZNVER1-NEXT: vcvttps2dq %xmm0, %xmm0 # sched: [5:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fptosi <4 x float> %a0 to <4 x i32>
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = fptosi <4 x float> %2 to <4 x i32>
; ZNVER1-NEXT: vcvttsd2si (%rdi), %eax # sched: [12:1.00]
; ZNVER1-NEXT: vcvttsd2si %xmm0, %ecx # sched: [5:1.00]
; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fptosi double %a0 to i32
%2 = load double, double *%a1, align 8
%3 = fptosi double %2 to i32
; ZNVER1-NEXT: vcvttsd2si (%rdi), %rax # sched: [12:1.00]
; ZNVER1-NEXT: vcvttsd2si %xmm0, %rcx # sched: [5:1.00]
; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fptosi double %a0 to i64
%2 = load double, double *%a1, align 8
%3 = fptosi double %2 to i64
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vdivpd %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
; ZNVER1-NEXT: vdivpd (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fdiv <2 x double> %a0, %a1
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fdiv <2 x double> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vdivsd %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
; ZNVER1-NEXT: vdivsd (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fdiv double %a0, %a1
%2 = load double, double *%a2, align 8
%3 = fdiv double %1, %2
; ZNVER1-LABEL: test_lfence:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: lfence # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.sse2.lfence()
ret void
}
; ZNVER1-LABEL: test_mfence:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: mfence # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.sse2.mfence()
ret void
}
;
; ZNVER1-LABEL: test_maskmovdqu:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmaskmovdqu %xmm1, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2)
ret void
}
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %1, <2 x double> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %1, <2 x double> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %1, <2 x double> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vminsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %1, <2 x double> %2)
; ZNVER1-NEXT: vmovapd (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovapd %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x double>, <2 x double> *%a0, align 16
%2 = fadd <2 x double> %1, %1
store <2 x double> %2, <2 x double> *%a1, align 16
; ZNVER1-NEXT: vmovdqa (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vmovdqa %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x i64>, <2 x i64> *%a0, align 16
%2 = add <2 x i64> %1, %1
store <2 x i64> %2, <2 x i64> *%a1, align 16
; ZNVER1-NEXT: vmovdqu (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vmovdqu %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x i64>, <2 x i64> *%a0, align 1
%2 = add <2 x i64> %1, %1
store <2 x i64> %2, <2 x i64> *%a1, align 1
; ZNVER1-LABEL: test_movd:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [8:0.50]
-; ZNVER1-NEXT: vmovd %edi, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vmovd %edi, %xmm1 # sched: [3:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vmovd %xmm1, (%rsi) # sched: [1:0.50]
; ZNVER1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vmovd %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmovd %xmm0, %eax # sched: [2:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <4 x i32> undef, i32 %a1, i32 0
%2 = load i32, i32 *%a2
%3 = insertelement <4 x i32> undef, i32 %2, i32 0
; ZNVER1-LABEL: test_movd_64:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero sched: [8:0.50]
-; ZNVER1-NEXT: vmovq %rdi, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT: vmovq %rdi, %xmm1 # sched: [3:1.00]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vmovq %xmm1, (%rsi) # sched: [1:0.50]
; ZNVER1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vmovq %xmm0, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmovq %xmm0, %rax # sched: [2:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <2 x i64> undef, i64 %a1, i64 0
%2 = load i64, i64 *%a2
%3 = insertelement <2 x i64> undef, i64 %2, i64 0
; ZNVER1-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovhpd %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast x86_mmx* %a2 to double*
%2 = load double, double *%1, align 8
%3 = insertelement <2 x double> %a1, double %2, i32 1
; ZNVER1-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovlpd %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast x86_mmx* %a2 to double*
%2 = load double, double *%1, align 8
%3 = insertelement <2 x double> %a1, double %2, i32 0
;
; ZNVER1-LABEL: test_movmskpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmovmskpd %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmovmskpd %xmm0, %eax # sched: [1:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
ret i32 %1
}
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vmovntdq %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = add <2 x i64> %a0, %a0
store <2 x i64> %1, <2 x i64> *%a1, align 16, !nontemporal !0
ret void
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovntpd %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fadd <2 x double> %a0, %a0
store <2 x double> %1, <2 x double> *%a1, align 16, !nontemporal !0
ret void
; ZNVER1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vmovq %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i64, i64* %a1, align 1
%2 = insertelement <2 x i64> zeroinitializer, i64 %1, i32 0
%3 = add <2 x i64> %a0, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
%2 = add <2 x i64> %a1, %1
ret <2 x i64> %2
; ZNVER1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [8:0.50]
; ZNVER1-NEXT: vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovsd %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load double, double* %a0, align 1
%2 = fadd double %1, %1
store double %2, double *%a1, align 1
; ZNVER1-LABEL: test_movsd_reg:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 2, i32 0>
ret <2 x double> %1
}
; ZNVER1-NEXT: vmovupd (%rdi), %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vmovupd %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x double>, <2 x double> *%a0, align 1
%2 = fadd <2 x double> %1, %1
store <2 x double> %2, <2 x double> *%a1, align 1
;
; ZNVER1-LABEL: test_mulpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmulpd %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
+; ZNVER1-NEXT: vmulpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fmul <2 x double> %a0, %a1
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fmul <2 x double> %1, %2
;
; ZNVER1-LABEL: test_mulsd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vmulsd %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
+; ZNVER1-NEXT: vmulsd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fmul double %a0, %a1
%2 = load double, double *%a2, align 8
%3 = fmul double %1, %2
; ZNVER1-NEXT: vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vorpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <2 x double> %a0 to <4 x i32>
%2 = bitcast <2 x double> %a1 to <4 x i32>
%3 = or <4 x i32> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackssdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1)
%2 = bitcast <8 x i16> %1 to <4 x i32>
%3 = load <4 x i32>, <4 x i32> *%a2, align 16
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpacksswb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = bitcast <16 x i8> %1 to <8 x i16>
%3 = load <8 x i16>, <8 x i16> *%a2, align 16
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackuswb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = bitcast <16 x i8> %1 to <8 x i16>
%3 = load <8 x i16>, <8 x i16> *%a2, align 16
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = add <16 x i8> %a0, %a1
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = add <16 x i8> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = add <4 x i32> %a0, %a1
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = add <4 x i32> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = add <2 x i64> %a0, %a1
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = add <2 x i64> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %1, <16 x i8> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %1, <16 x i8> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = add <8 x i16> %a0, %a1
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = add <8 x i16> %1, %2
; ZNVER1-NEXT: vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpand (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = and <2 x i64> %a0, %a1
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = and <2 x i64> %1, %2
; ZNVER1-NEXT: vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpandn (%rdi), %xmm0, %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = xor <2 x i64> %a0, <i64 -1, i64 -1>
%2 = and <2 x i64> %a1, %1
%3 = load <2 x i64>, <2 x i64> *%a2, align 16
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpavgb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %1, <16 x i8> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpavgw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %1, <8 x i16> %2)
; ZNVER1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = icmp eq <16 x i8> %a0, %a1
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = icmp eq <16 x i8> %a0, %2
; ZNVER1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = icmp eq <4 x i32> %a0, %a1
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = icmp eq <4 x i32> %a0, %2
; ZNVER1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = icmp eq <8 x i16> %a0, %a1
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = icmp eq <8 x i16> %a0, %2
; ZNVER1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = icmp sgt <16 x i8> %a0, %a1
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = icmp sgt <16 x i8> %a0, %2
; ZNVER1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = icmp sgt <4 x i32> %a0, %a1
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = icmp eq <4 x i32> %a0, %2
; ZNVER1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = icmp sgt <8 x i16> %a0, %a1
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = icmp sgt <8 x i16> %a0, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = extractelement <8 x i16> %a0, i32 6
ret i16 %1
}
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <8 x i16> %a0, i16 %a1, i32 1
%2 = load i16, i16 *%a2
%3 = insertelement <8 x i16> %1, i16 %2, i32 3
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
%2 = bitcast <4 x i32> %1 to <8 x i16>
%3 = load <8 x i16>, <8 x i16> *%a2, align 16
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxub (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %1, <16 x i8> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminub (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %1, <16 x i8> %2)
;
; ZNVER1-LABEL: test_pmovmskb:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpmovmskb %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vpmovmskb %xmm0, %eax # sched: [1:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0)
ret i32 %1
}
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmullw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmullw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = mul <8 x i16> %a0, %a1
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = mul <8 x i16> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmuludq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1)
%2 = bitcast <2 x i64> %1 to <4 x i32>
%3 = load <4 x i32>, <4 x i32> *%a2, align 16
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = or <2 x i64> %a0, %a1
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = or <2 x i64> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsadbw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpsadbw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1)
%2 = bitcast <2 x i64> %1 to <16 x i8>
%3 = load <16 x i8>, <16 x i8> *%a2, align 16
; ZNVER1-NEXT: vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50]
; ZNVER1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%2 = load <4 x i32>, <4 x i32> *%a1, align 16
%3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; ZNVER1-NEXT: vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [8:0.50]
; ZNVER1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.25]
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 7, i32 6>
%2 = load <8 x i16>, <8 x i16> *%a1, align 16
%3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4>
; ZNVER1-NEXT: vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [8:0.50]
; ZNVER1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.25]
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7>
%2 = load <8 x i16>, <8 x i16> *%a1, align 16
%3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
;
; ZNVER1-LABEL: test_pslld:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpslld %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT: vpslld (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpslld $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %1, <4 x i32> %2)
;
; ZNVER1-LABEL: test_pslldq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
ret <4 x i32> %1
}
;
; ZNVER1-LABEL: test_psllq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT: vpsllq (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsllq $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %1, <2 x i64> %2)
;
; ZNVER1-LABEL: test_psllw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT: vpsllw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsllw $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %1, <8 x i16> %2)
;
; ZNVER1-LABEL: test_psrad:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT: vpsrad (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsrad $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> %2)
;
; ZNVER1-LABEL: test_psraw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT: vpsraw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsraw $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %1, <8 x i16> %2)
;
; ZNVER1-LABEL: test_psrld:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT: vpsrld (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsrld $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %1, <4 x i32> %2)
;
; ZNVER1-LABEL: test_psrldq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
ret <4 x i32> %1
}
;
; ZNVER1-LABEL: test_psrlq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT: vpsrlq (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %1, <2 x i64> %2)
;
; ZNVER1-LABEL: test_psrlw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sub <16 x i8> %a0, %a1
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = sub <16 x i8> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sub <4 x i32> %a0, %a1
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = sub <4 x i32> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sub <2 x i64> %a0, %a1
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = sub <2 x i64> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %1, <16 x i8> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %1, <16 x i8> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = sub <8 x i16> %a0, %a1
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = sub <8 x i16> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
; ZNVER1-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = shufflevector <4 x i32> %a1, <4 x i32> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = shufflevector <2 x i64> %a1, <2 x i64> %2, <2x i32> <i32 1, i32 3>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
; ZNVER1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [8:0.50]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = shufflevector <4 x i32> %a1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = shufflevector <2 x i64> %a1, <2 x i64> %2, <2x i32> <i32 0, i32 2>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
; ZNVER1-NEXT: vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpxor (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = xor <2 x i64> %a0, %a1
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = xor <2 x i64> %1, %2
; ZNVER1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:0.50]
; ZNVER1-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = shufflevector <2 x double> %a1, <2 x double> %2, <2 x i32> <i32 1, i32 2>
; ZNVER1-NEXT: vsqrtpd (%rdi), %xmm1 # sched: [27:1.00]
; ZNVER1-NEXT: vsqrtpd %xmm0, %xmm0 # sched: [20:1.00]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0)
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %2)
; ZNVER1-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [27:1.00]
; ZNVER1-NEXT: vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [27:1.00]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0)
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fsub <2 x double> %a0, %a1
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fsub <2 x double> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vsubsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = fsub double %a0, %a1
%2 = load double, double *%a2, align 8
%3 = fsub double %1, %2
; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25]
; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25]
; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 8
%3 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %2)
; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50]
; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = shufflevector <2 x double> %a1, <2 x double> %2, <2 x i32> <i32 1, i32 3>
; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = shufflevector <2 x double> %1, <2 x double> %2, <2 x i32> <i32 0, i32 2>
; ZNVER1-NEXT: vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vxorpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = bitcast <2 x double> %a0 to <4 x i32>
%2 = bitcast <2 x double> %a1 to <4 x i32>
%3 = xor <4 x i32> %1, %2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %1, <2 x double> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vaddsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %1, <4 x float> %2)
;
; ZNVER1-LABEL: test_haddpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vhaddpd (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %1, <2 x double> %2)
;
; ZNVER1-LABEL: test_haddps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vhaddps %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vhaddps (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %1, <4 x float> %2)
;
; ZNVER1-LABEL: test_hsubpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vhsubpd (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %1, <2 x double> %2)
;
; ZNVER1-LABEL: test_hsubps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vhsubps %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vhsubps (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %1, <4 x float> %2)
; ZNVER1-LABEL: test_lddqu:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vlddqu (%rdi), %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %a0)
ret <16 x i8> %1
}
; ZNVER1-NEXT: leaq (%rdi), %rax # sched: [1:0.25]
; ZNVER1-NEXT: movl %esi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: monitor # sched: [100:?]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
tail call void @llvm.x86.sse3.monitor(i8* %a0, i32 %a1, i32 %a2)
ret void
}
; ZNVER1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [8:0.50]
; ZNVER1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
; ZNVER1-NEXT: vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [8:0.50]
; ZNVER1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
; ZNVER1-NEXT: vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [8:0.50]
; ZNVER1-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
; ZNVER1-NEXT: movl %edi, %ecx # sched: [1:0.25]
; ZNVER1-NEXT: movl %esi, %eax # sched: [1:0.25]
; ZNVER1-NEXT: mwait # sched: [100:?]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
tail call void @llvm.x86.sse3.mwait(i32 %a0, i32 %a1)
ret void
}
; ZNVER1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.50]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 3>
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = fadd <2 x double> %a1, %1
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50]
; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
%2 = load <2 x double>, <2 x double> *%a3, align 16
%3 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %1, <2 x double> %2, <2 x double> %a2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
%2 = load <4 x float>, <4 x float> *%a3
%3 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %1, <4 x float> %2, <4 x float> %a2)
;
; ZNVER1-LABEL: test_dppd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7)
%2 = load <2 x double>, <2 x double> *%a2, align 16
%3 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %1, <2 x double> %2, i8 7)
;
; ZNVER1-LABEL: test_dpps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %1, <4 x float> %2, i8 7)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:0.50]
; ZNVER1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17)
%2 = load float, float *%a2
%3 = insertelement <4 x float> %1, float %2, i32 3
; ZNVER1-LABEL: test_movntdqa:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmovntdqa (%rdi), %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %a0)
ret <2 x i64> %1
}
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7)
%2 = bitcast <8 x i16> %1 to <16 x i8>
%3 = load <16 x i8>, <16 x i8> *%a2, align 16
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackusdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
%2 = bitcast <8 x i16> %1 to <4 x i32>
%3 = load <4 x i32>, <4 x i32> *%a2, align 16
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2)
%2 = load <16 x i8>, <16 x i8> *%a3, align 16
%3 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %1, <16 x i8> %2, <16 x i8> %a2)
;
; ZNVER1-LABEL: test_pblendw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.50]
+; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.33]
; ZNVER1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = icmp eq <2 x i64> %a0, %a1
%2 = sext <2 x i1> %1 to <2 x i64>
%3 = load <2 x i64>, <2 x i64>*%a2, align 16
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpextrb $3, %xmm0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vpextrb $1, %xmm0, (%rdi) # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = extractelement <16 x i8> %a0, i32 3
%2 = extractelement <16 x i8> %a0, i32 1
store i8 %2, i8 *%a1
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpextrd $3, %xmm0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vpextrd $1, %xmm0, (%rdi) # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = extractelement <4 x i32> %a0, i32 3
%2 = extractelement <4 x i32> %a0, i32 1
store i32 %2, i32 *%a1
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpextrq $1, %xmm0, %rax # sched: [1:0.25]
; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = extractelement <2 x i64> %a0, i32 1
%2 = extractelement <2 x i64> %a0, i32 1
store i64 %2, i64 *%a2
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpextrw $3, %xmm0, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vpextrw $1, %xmm0, (%rdi) # sched: [8:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = extractelement <8 x i16> %a0, i32 3
%2 = extractelement <8 x i16> %a0, i32 1
store i16 %2, i16 *%a1
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vphminposuw (%rdi), %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: vphminposuw %xmm0, %xmm0 # sched: [4:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <8 x i16>, <8 x i16> *%a0, align 16
%2 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %1)
%3 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <16 x i8> %a0, i8 %a1, i32 1
%2 = load i8, i8 *%a2
%3 = insertelement <16 x i8> %1, i8 %2, i32 3
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <4 x i32> %a0, i32 %a1, i32 1
%2 = load i32, i32 *%a2
%3 = insertelement <4 x i32> %1, i32 %2, i32 3
; ZNVER1-NEXT: vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = insertelement <2 x i64> %a0, i64 %a2, i32 1
%2 = load i64, i64 *%a3
%3 = insertelement <2 x i64> %a1, i64 %2, i32 1
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %1, <16 x i8> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %1, <4 x i32> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxud (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %1, <4 x i32> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %1, <16 x i8> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %1, <4 x i32> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminud (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %1, <4 x i32> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %1, <8 x i16> %2)
; ZNVER1-NEXT: vpmovsxbw (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbw %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = sext <8 x i8> %1 to <8 x i16>
%3 = load <8 x i8>, <8 x i8>* %a1, align 1
; ZNVER1-NEXT: vpmovsxbd (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbd %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = sext <4 x i8> %1 to <4 x i32>
%3 = load <4 x i8>, <4 x i8>* %a1, align 1
; ZNVER1-NEXT: vpmovsxbq (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbq %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
%2 = sext <2 x i8> %1 to <2 x i64>
%3 = load <2 x i8>, <2 x i8>* %a1, align 1
; ZNVER1-NEXT: vpmovsxdq (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxdq %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%2 = sext <2 x i32> %1 to <2 x i64>
%3 = load <2 x i32>, <2 x i32>* %a1, align 1
; ZNVER1-NEXT: vpmovsxwd (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxwd %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = sext <4 x i16> %1 to <4 x i32>
%3 = load <4 x i16>, <4 x i16>* %a1, align 1
; ZNVER1-NEXT: vpmovsxwq (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxwq %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
%2 = sext <2 x i16> %1 to <2 x i64>
%3 = load <2 x i16>, <2 x i16>* %a1, align 1
; ZNVER1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = zext <8 x i8> %1 to <8 x i16>
%3 = load <8 x i8>, <8 x i8>* %a1, align 1
; ZNVER1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = zext <4 x i8> %1 to <4 x i32>
%3 = load <4 x i8>, <4 x i8>* %a1, align 1
; ZNVER1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
%2 = zext <2 x i8> %1 to <2 x i64>
%3 = load <2 x i8>, <2 x i8>* %a1, align 1
; ZNVER1-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%2 = zext <2 x i32> %1 to <2 x i64>
%3 = load <2 x i32>, <2 x i32>* %a1, align 1
; ZNVER1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = zext <4 x i16> %1 to <4 x i32>
%3 = load <4 x i16>, <4 x i16>* %a1, align 1
; ZNVER1-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
%2 = zext <2 x i16> %1 to <2 x i64>
%3 = load <2 x i16>, <2 x i16>* %a1, align 1
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmuldq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1)
%2 = bitcast <2 x i64> %1 to <4 x i32>
%3 = load <4 x i32>, <4 x i32> *%a2, align 16
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulld (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = mul <4 x i32> %a0, %a1
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = mul <4 x i32> %1, %2
;
; ZNVER1-LABEL: test_ptest:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vptest %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vptest %xmm1, %xmm0 # sched: [1:1.00]
; ZNVER1-NEXT: setb %al # sched: [1:0.25]
-; ZNVER1-NEXT: vptest (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT: vptest (%rdi), %xmm0 # sched: [8:1.00]
; ZNVER1-NEXT: setb %cl # sched: [1:0.25]
; ZNVER1-NEXT: andb %al, %cl # sched: [1:0.25]
; ZNVER1-NEXT: movzbl %cl, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1)
%2 = load <2 x i64>, <2 x i64> *%a2, align 16
%3 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %2)
;
; ZNVER1-LABEL: test_roundpd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [10:1.00]
-; ZNVER1-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vroundpd $7, (%rdi), %xmm1 # sched: [11:1.00]
+; ZNVER1-NEXT: vroundpd $7, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7)
%2 = load <2 x double>, <2 x double> *%a1, align 16
%3 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %2, i32 7)
;
; ZNVER1-LABEL: test_roundps:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [10:1.00]
-; ZNVER1-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT: vroundps $7, (%rdi), %xmm1 # sched: [11:1.00]
+; ZNVER1-NEXT: vroundps $7, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7)
%2 = load <4 x float>, <4 x float> *%a1, align 16
%3 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %2, i32 7)
;
; ZNVER1-LABEL: test_roundsd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
-; ZNVER1-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [4:1.00]
+; ZNVER1-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7)
%2 = load <2 x double>, <2 x double>* %a2, align 16
%3 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %2, i32 7)
;
; ZNVER1-LABEL: test_roundss:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
-; ZNVER1-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT: vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [4:1.00]
+; ZNVER1-NEXT: vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7)
%2 = load <4 x float>, <4 x float> *%a2, align 16
%3 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %2, i32 7)
; ZNVER1-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; ZNVER1-NEXT: crc32b (%rdx), %edi # sched: [10:1.00]
; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a0, i8 %a1)
%2 = load i8, i8 *%a2
%3 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %1, i8 %2)
; ZNVER1-NEXT: crc32w %si, %edi # sched: [3:1.00]
; ZNVER1-NEXT: crc32w (%rdx), %edi # sched: [10:1.00]
; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a0, i16 %a1)
%2 = load i16, i16 *%a2
%3 = call i32 @llvm.x86.sse42.crc32.32.16(i32 %1, i16 %2)
; ZNVER1-NEXT: crc32l %esi, %edi # sched: [3:1.00]
; ZNVER1-NEXT: crc32l (%rdx), %edi # sched: [10:1.00]
; ZNVER1-NEXT: movl %edi, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a0, i32 %a1)
%2 = load i32, i32 *%a2
%3 = call i32 @llvm.x86.sse42.crc32.32.32(i32 %1, i32 %2)
; ZNVER1-NEXT: crc32b %sil, %edi # sched: [3:1.00]
; ZNVER1-NEXT: crc32b (%rdx), %edi # sched: [10:1.00]
; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a0, i8 %a1)
%2 = load i8, i8 *%a2
%3 = call i64 @llvm.x86.sse42.crc32.64.8(i64 %1, i8 %2)
; ZNVER1-NEXT: crc32q %rsi, %rdi # sched: [3:1.00]
; ZNVER1-NEXT: crc32q (%rdx), %rdi # sched: [10:1.00]
; ZNVER1-NEXT: movq %rdi, %rax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a0, i64 %a1)
%2 = load i64, i64 *%a2
%3 = call i64 @llvm.x86.sse42.crc32.64.64(i64 %1, i64 %2)
; ZNVER1-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; ZNVER1-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %2, i32 7, i8 7)
; ZNVER1-NEXT: movl $7, %eax # sched: [1:0.25]
; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpestrm $7, (%rdi), %xmm0 # sched: [100:?]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7)
; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; ZNVER1-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %2, i8 7)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpcmpistrm $7, %xmm1, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vpcmpistrm $7, (%rdi), %xmm0 # sched: [100:?]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %1, <16 x i8> %2, i8 7)
;
; ZNVER1-LABEL: test_pcmpgtq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = icmp sgt <2 x i64> %a0, %a1
%2 = sext <2 x i1> %1 to <2 x i64>
%3 = load <2 x i64>, <2 x i64>*%a2, align 16
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [100:?]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load <2 x i64>, <2 x i64> *%a2, align 16
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
%3 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %2, i8 0)
;
; ZNVER1-LABEL: test_extrq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: extrq %xmm1, %xmm0
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: extrq %xmm1, %xmm0 # sched: [2:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %a0, <16 x i8> %a1)
ret <2 x i64> %1
}
;
; ZNVER1-LABEL: test_extrqi:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: extrq $2, $3, %xmm0
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: extrq $2, $3, %xmm0 # sched: [2:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a0, i8 3, i8 2)
ret <2 x i64> %1
}
;
; ZNVER1-LABEL: test_insertq:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: insertq %xmm1, %xmm0
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: insertq %xmm1, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %a0, <2 x i64> %a1)
ret <2 x i64> %1
}
;
; ZNVER1-LABEL: test_insertqi:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: insertq $6, $5, %xmm1, %xmm0
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: insertq $6, $5, %xmm1, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 6)
ret <2 x i64> %1
}
;
; ZNVER1-LABEL: test_movntsd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: movntsd %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: movntsd %xmm0, (%rdi) # sched: [8:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a)
ret void
}
;
; ZNVER1-LABEL: test_movntss:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: movntss %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: movntss %xmm0, (%rdi) # sched: [8:1.00]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a)
ret void
}
; ZNVER1-NEXT: vpabsb (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsb %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0)
%2 = load <16 x i8>, <16 x i8> *%a1, align 16
%3 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %2)
; ZNVER1-NEXT: vpabsd (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsd %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0)
%2 = load <4 x i32>, <4 x i32> *%a1, align 16
%3 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %2)
; ZNVER1-NEXT: vpabsw (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsw %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0)
%2 = load <8 x i16>, <8 x i16> *%a1, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.25]
; ZNVER1-NEXT: vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = shufflevector <8 x i16> %2, <8 x i16> %1, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
;
; ZNVER1-LABEL: test_phaddd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vphaddd (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %1, <4 x i32> %2)
;
; ZNVER1-LABEL: test_phaddsw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vphaddsw %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vphaddsw (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %1, <8 x i16> %2)
;
; ZNVER1-LABEL: test_phaddw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vphaddw %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vphaddw (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %1, <8 x i16> %2)
;
; ZNVER1-LABEL: test_phsubd:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vphsubd %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vphsubd (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %1, <4 x i32> %2)
;
; ZNVER1-LABEL: test_phsubsw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vphsubsw %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vphsubsw (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %1, <8 x i16> %2)
;
; ZNVER1-LABEL: test_phsubw:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: vphsubw %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: vphsubw (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = bitcast <8 x i16> %1 to <16 x i8>
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %1, <8 x i16> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpshufb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1)
%2 = load <16 x i8>, <16 x i8> *%a2, align 16
%3 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %1, <16 x i8> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1)
%2 = load <4 x i32>, <4 x i32> *%a2, align 16
%3 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %1, <4 x i32> %2)
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT: retq # sched: [5:0.50]
+; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = load <8 x i16>, <8 x i16> *%a2, align 16
%3 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %1, <8 x i16> %2)