/// MachineSinking - This pass performs sinking on machine instructions.
extern char &MachineSinkingID;
+ /// MachineCopyPropagationPreRegRewrite - This pass performs copy propagation
+ /// on machine instructions after register allocation but before virtual
+ /// register re-writing..
+ extern char &MachineCopyPropagationPreRegRewriteID;
+
/// MachineCopyPropagation - This pass performs copy propagation on
/// machine instructions.
extern char &MachineCopyPropagationID;
void initializeMachineCSEPass(PassRegistry&);
void initializeMachineCombinerPass(PassRegistry&);
void initializeMachineCopyPropagationPass(PassRegistry&);
+void initializeMachineCopyPropagationPreRegRewritePass(PassRegistry&);
void initializeMachineDominanceFrontierPass(PassRegistry&);
void initializeMachineDominatorTreePass(PassRegistry&);
void initializeMachineFunctionPrinterPassPass(PassRegistry&);
initializeMachineCSEPass(Registry);
initializeMachineCombinerPass(Registry);
initializeMachineCopyPropagationPass(Registry);
+ initializeMachineCopyPropagationPreRegRewritePass(Registry);
initializeMachineDominatorTreePass(Registry);
initializeMachineFunctionPrinterPassPass(Registry);
initializeMachineLICMPass(Registry);
//
//===----------------------------------------------------------------------===//
//
-// This is an extremely simple MachineInstr-level copy propagation pass.
+// This is a simple MachineInstr-level copy forwarding pass. It may be run at
+// two places in the codegen pipeline:
+// - After register allocation but before virtual registers have been remapped
+// to physical registers.
+// - After physical register remapping.
+//
+// The optimizations done vary slightly based on whether virtual registers are
+// still present. In both cases, this pass forwards the source of COPYs to the
+// users of their destinations when doing so is legal. For example:
+//
+// %vreg1 = COPY %vreg0
+// ...
+// ... = OP %vreg1
+//
+// If
+// - the physical register assigned to %vreg0 has not been clobbered by the
+// time of the use of %vreg1
+// - the register class constraints are satisfied
+// - the COPY def is the only value that reaches OP
+// then this pass replaces the above with:
+//
+// %vreg1 = COPY %vreg0
+// ...
+// ... = OP %vreg0
+//
+// and updates the relevant state required by VirtRegMap (e.g. LiveIntervals).
+// COPYs whose LiveIntervals become dead as a result of this forwarding (i.e. if
+// all uses of %vreg1 are changed to %vreg0) are removed.
+//
+// When being run with only physical registers, this pass will also remove some
+// redundant COPYs. For example:
+//
+// %R1 = COPY %R0
+// ... // No clobber of %R1
+// %R0 = COPY %R1 <<< Removed
+//
+// or
+//
+// %R1 = COPY %R0
+// ... // No clobber of %R0
+// %R1 = COPY %R0 <<< Removed
//
//===----------------------------------------------------------------------===//
+#include "LiveDebugVariables.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#define DEBUG_TYPE "machine-cp"
STATISTIC(NumDeletes, "Number of dead copies deleted");
+STATISTIC(NumCopyForwards, "Number of copy uses forwarded");
namespace {
typedef SmallVector<unsigned, 4> RegList;
typedef DenseMap<unsigned, RegList> SourceMap;
typedef DenseMap<unsigned, MachineInstr*> Reg2MIMap;
- class MachineCopyPropagation : public MachineFunctionPass {
+ class MachineCopyPropagation : public MachineFunctionPass,
+ private LiveRangeEdit::Delegate {
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
- const MachineRegisterInfo *MRI;
+ MachineRegisterInfo *MRI;
+ MachineFunction *MF;
+ SlotIndexes *Indexes;
+ LiveIntervals *LIS;
+ const VirtRegMap *VRM;
+ // True if this pass being run before virtual registers are remapped to
+ // physical ones.
+ bool PreRegRewrite;
+ bool NoSubRegLiveness;
+
+ protected:
+ MachineCopyPropagation(char &ID, bool PreRegRewrite)
+ : MachineFunctionPass(ID), PreRegRewrite(PreRegRewrite) {}
public:
static char ID; // Pass identification, replacement for typeid
- MachineCopyPropagation() : MachineFunctionPass(ID) {
+ MachineCopyPropagation() : MachineCopyPropagation(ID, false) {
initializeMachineCopyPropagationPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
+ if (PreRegRewrite) {
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addRequired<VirtRegMap>();
+ AU.addPreserved<VirtRegMap>();
+ AU.addPreserved<LiveDebugVariables>();
+ AU.addPreserved<LiveStacks>();
+ }
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool runOnMachineFunction(MachineFunction &MF) override;
MachineFunctionProperties getRequiredProperties() const override {
+ if (PreRegRewrite)
+ return MachineFunctionProperties()
+ .set(MachineFunctionProperties::Property::NoPHIs)
+ .set(MachineFunctionProperties::Property::TracksLiveness);
return MachineFunctionProperties().set(
MachineFunctionProperties::Property::NoVRegs);
}
void ReadRegister(unsigned Reg);
void CopyPropagateBlock(MachineBasicBlock &MBB);
bool eraseIfRedundant(MachineInstr &Copy, unsigned Src, unsigned Def);
+ unsigned getPhysReg(unsigned Reg, unsigned SubReg);
+ unsigned getPhysReg(const MachineOperand &Opnd) {
+ return getPhysReg(Opnd.getReg(), Opnd.getSubReg());
+ }
+ unsigned getFullPhysReg(const MachineOperand &Opnd) {
+ return getPhysReg(Opnd.getReg(), 0);
+ }
+ void forwardUses(MachineInstr &MI);
+ bool isForwardableRegClassCopy(const MachineInstr &Copy,
+ const MachineInstr &UseI);
+ std::tuple<unsigned, unsigned, bool>
+ checkUseSubReg(const MachineOperand &CopySrc, const MachineOperand &MOUse);
+ bool hasImplicitOverlap(const MachineInstr &MI, const MachineOperand &Use);
+ void narrowRegClass(const MachineInstr &MI, const MachineOperand &MOUse,
+ unsigned NewUseReg, unsigned NewUseSubReg);
+ void updateForwardedCopyLiveInterval(const MachineInstr &Copy,
+ const MachineInstr &UseMI,
+ unsigned OrigUseReg,
+ unsigned NewUseReg,
+ unsigned NewUseSubReg);
+ /// LiveRangeEdit callback for eliminateDeadDefs().
+ void LRE_WillEraseInstruction(MachineInstr *MI) override;
/// Candidates for deletion.
SmallSetVector<MachineInstr*, 8> MaybeDeadCopies;
SourceMap SrcMap;
bool Changed;
};
+
+ class MachineCopyPropagationPreRegRewrite : public MachineCopyPropagation {
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ MachineCopyPropagationPreRegRewrite()
+ : MachineCopyPropagation(ID, true) {
+ initializeMachineCopyPropagationPreRegRewritePass(*PassRegistry::getPassRegistry());
+ }
+ };
}
char MachineCopyPropagation::ID = 0;
char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID;
INITIALIZE_PASS(MachineCopyPropagation, DEBUG_TYPE,
"Machine Copy Propagation Pass", false, false)
+/// We have two separate passes that are very similar, the only difference being
+/// where they are meant to be run in the pipeline. This is done for several
+/// reasons:
+/// - the two passes have different dependencies
+/// - some targets want to disable the later run of this pass, but not the
+/// earlier one (e.g. NVPTX and WebAssembly)
+/// - it allows for easier debugging via llc
+
+char MachineCopyPropagationPreRegRewrite::ID = 0;
+char &llvm::MachineCopyPropagationPreRegRewriteID = MachineCopyPropagationPreRegRewrite::ID;
+
+INITIALIZE_PASS_BEGIN(MachineCopyPropagationPreRegRewrite,
+ "machine-cp-prerewrite",
+ "Machine Copy Propagation Pre-Register Rewrite Pass",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
+INITIALIZE_PASS_END(MachineCopyPropagationPreRegRewrite,
+ "machine-cp-prerewrite",
+ "Machine Copy Propagation Pre-Register Rewrite Pass", false,
+ false)
+
/// Remove any entry in \p Map where the register is a subregister or equal to
/// a register contained in \p Regs.
static void removeRegsFromMap(Reg2MIMap &Map, const RegList &Regs,
}
void MachineCopyPropagation::ReadRegister(unsigned Reg) {
+ // We don't track MaybeDeadCopies when running pre-VirtRegRewriter.
+ if (PreRegRewrite)
+ return;
+
// If 'Reg' is defined by a copy, the copy is no longer a candidate
// for elimination.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
return SubIdx == TRI->getSubRegIndex(PreviousDef, Def);
}
+/// Return the physical register assigned to \p Reg if it is a virtual register,
+/// otherwise just return the physical reg from the operand itself.
+///
+/// If \p SubReg is 0 then return the full physical register assigned to the
+/// virtual register ignoring subregs. If we aren't tracking sub-reg liveness
+/// then we need to use this to be more conservative with clobbers by killing
+/// all super reg and their sub reg COPYs as well. This is to prevent COPY
+/// forwarding in cases like the following:
+///
+/// %vreg2 = COPY %vreg1:sub1
+/// %vreg3 = COPY %vreg1:sub0
+/// ... = OP1 %vreg2
+/// ... = OP2 %vreg3
+///
+/// After forward %vreg2 (assuming this is the last use of %vreg1) and
+/// VirtRegRewriter adding kill markers we have:
+///
+/// %vreg3 = COPY %vreg1:sub0
+/// ... = OP1 %vreg1:sub1<kill>
+/// ... = OP2 %vreg3
+///
+/// If %vreg3 is assigned to a sub-reg of %vreg1, then after rewriting we have:
+///
+/// ... = OP1 R0:sub1, R0<imp-use,kill>
+/// ... = OP2 R0:sub0
+///
+/// and the use of R0 by OP2 will not have a valid definition.
+unsigned MachineCopyPropagation::getPhysReg(unsigned Reg, unsigned SubReg) {
+
+ // Physical registers cannot have subregs.
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ return Reg;
+
+ assert(PreRegRewrite && "Unexpected virtual register encountered");
+ Reg = VRM->getPhys(Reg);
+ if (SubReg && !NoSubRegLiveness)
+ Reg = TRI->getSubReg(Reg, SubReg);
+ return Reg;
+}
+
/// Remove instruction \p Copy if there exists a previous copy that copies the
/// register \p Src to the register \p Def; This may happen indirectly by
/// copying the super registers.
return true;
}
+
+/// Decide whether we should forward the destination of \param Copy to its use
+/// in \param UseI based on the register class of the Copy operands. Same-class
+/// COPYs are always accepted by this function, but cross-class COPYs are only
+/// accepted if they are forwarded to another COPY with the operand register
+/// classes reversed. For example:
+///
+/// RegClassA = COPY RegClassB // Copy parameter
+/// ...
+/// RegClassB = COPY RegClassA // UseI parameter
+///
+/// which after forwarding becomes
+///
+/// RegClassA = COPY RegClassB
+/// ...
+/// RegClassB = COPY RegClassB
+///
+/// so we have reduced the number of cross-class COPYs and potentially
+/// introduced a no COPY that can be removed.
+bool MachineCopyPropagation::isForwardableRegClassCopy(
+ const MachineInstr &Copy, const MachineInstr &UseI) {
+ auto isCross = [&](const MachineOperand &Dst, const MachineOperand &Src) {
+ unsigned DstReg = Dst.getReg();
+ unsigned SrcPhysReg = getPhysReg(Src);
+ const TargetRegisterClass *DstRC;
+ if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ DstRC = MRI->getRegClass(DstReg);
+ unsigned DstSubReg = Dst.getSubReg();
+ if (DstSubReg)
+ SrcPhysReg = TRI->getMatchingSuperReg(SrcPhysReg, DstSubReg, DstRC);
+ } else
+ DstRC = TRI->getMinimalPhysRegClass(DstReg);
+
+ return !DstRC->contains(SrcPhysReg);
+ };
+
+ const MachineOperand &CopyDst = Copy.getOperand(0);
+ const MachineOperand &CopySrc = Copy.getOperand(1);
+
+ if (!isCross(CopyDst, CopySrc))
+ return true;
+
+ if (!UseI.isCopy())
+ return false;
+
+ assert(getFullPhysReg(UseI.getOperand(1)) == getFullPhysReg(CopyDst));
+ return !isCross(UseI.getOperand(0), CopySrc);
+}
+
+/// Check that the subregs on the copy source operand (\p CopySrc) and the use
+/// operand to be forwarded to (\p MOUse) are compatible with doing the
+/// forwarding. Also computes the new register and subregister to be used in
+/// the forwarded-to instruction.
+std::tuple<unsigned, unsigned, bool> MachineCopyPropagation::checkUseSubReg(
+ const MachineOperand &CopySrc, const MachineOperand &MOUse) {
+ unsigned NewUseReg = CopySrc.getReg();
+ unsigned NewUseSubReg;
+
+ if (TargetRegisterInfo::isPhysicalRegister(NewUseReg)) {
+ // If MOUse is a virtual reg, we need to apply it to the new physical reg
+ // we're going to replace it with.
+ if (MOUse.getSubReg())
+ NewUseReg = TRI->getSubReg(NewUseReg, MOUse.getSubReg());
+ // If the original use subreg isn't valid on the new src reg, we can't
+ // forward it here.
+ if (!NewUseReg)
+ return std::make_tuple(0, 0, false);
+ NewUseSubReg = 0;
+ } else {
+ // %v1 = COPY %v2:sub1
+ // USE %v1:sub2
+ // The new use is %v2:sub1:sub2
+ NewUseSubReg =
+ TRI->composeSubRegIndices(CopySrc.getSubReg(), MOUse.getSubReg());
+ // Check that NewUseSubReg is valid on NewUseReg
+ if (NewUseSubReg &&
+ !TRI->getSubClassWithSubReg(MRI->getRegClass(NewUseReg), NewUseSubReg))
+ return std::make_tuple(0, 0, false);
+ }
+
+ return std::make_tuple(NewUseReg, NewUseSubReg, true);
+}
+
+/// Check that \p MI does not have implicit uses that overlap with it's \p Use
+/// operand (the register being replaced), since these can sometimes be
+/// implicitly tied to other operands. For example, on AMDGPU:
+///
+/// V_MOVRELS_B32_e32 %VGPR2, %M0<imp-use>, %EXEC<imp-use>, %VGPR2_VGPR3_VGPR4_VGPR5<imp-use>
+///
+/// the %VGPR2 is implicitly tied to the larger reg operand, but we have no
+/// way of knowing we need to update the latter when updating the former.
+bool MachineCopyPropagation::hasImplicitOverlap(const MachineInstr &MI,
+ const MachineOperand &Use) {
+ if (!TargetRegisterInfo::isPhysicalRegister(Use.getReg()))
+ return false;
+
+ for (const MachineOperand &MIUse : MI.uses())
+ if (&MIUse != &Use && MIUse.isReg() && MIUse.isImplicit() &&
+ TRI->regsOverlap(Use.getReg(), MIUse.getReg()))
+ return true;
+
+ return false;
+}
+
+/// Narrow the register class of the forwarded vreg so it matches any
+/// instruction constraints. \p MI is the instruction being forwarded to. \p
+/// MOUse is the operand being replaced in \p MI (which hasn't yet been updated
+/// at the time this function is called). \p NewUseReg and \p NewUseSubReg are
+/// what the \p MOUse will be changed to after forwarding.
+///
+/// If we are forwarding
+/// A:RCA = COPY B:RCB
+/// into
+/// ... = OP A:RCA
+///
+/// then we need to narrow the register class of B so that it is a subclass
+/// of RCA so that it meets the instruction register class constraints.
+void MachineCopyPropagation::narrowRegClass(const MachineInstr &MI,
+ const MachineOperand &MOUse,
+ unsigned NewUseReg,
+ unsigned NewUseSubReg) {
+ if (!TargetRegisterInfo::isVirtualRegister(NewUseReg))
+ return;
+
+ // Make sure the virtual reg class allows the subreg.
+ if (NewUseSubReg) {
+ const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg);
+ const TargetRegisterClass *NewUseRC =
+ TRI->getSubClassWithSubReg(CurUseRC, NewUseSubReg);
+ if (CurUseRC != NewUseRC) {
+ DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI)
+ << " to " << TRI->getRegClassName(NewUseRC) << "\n");
+ MRI->setRegClass(NewUseReg, NewUseRC);
+ }
+ }
+
+ unsigned MOUseOpNo = &MOUse - &MI.getOperand(0);
+ const TargetRegisterClass *InstRC =
+ TII->getRegClass(MI.getDesc(), MOUseOpNo, TRI, *MF);
+ if (InstRC) {
+ const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg);
+ if (NewUseSubReg)
+ InstRC = TRI->getMatchingSuperRegClass(CurUseRC, InstRC, NewUseSubReg);
+ if (!InstRC->hasSubClassEq(CurUseRC)) {
+ const TargetRegisterClass *NewUseRC =
+ TRI->getCommonSubClass(InstRC, CurUseRC);
+ DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI)
+ << " to " << TRI->getRegClassName(NewUseRC) << "\n");
+ MRI->setRegClass(NewUseReg, NewUseRC);
+ }
+ }
+}
+
+/// Update the LiveInterval information to reflect the destination of \p Copy
+/// being forwarded to a use in \p UseMI. \p OrigUseReg is the register being
+/// forwarded through. It should be the destination register of \p Copy and has
+/// already been replaced in \p UseMI at the point this function is called. \p
+/// NewUseReg and \p NewUseSubReg are the register and subregister being
+/// forwarded. They should be the source register of the \p Copy and should be
+/// the value of the \p UseMI operand being forwarded at the point this function
+/// is called.
+void MachineCopyPropagation::updateForwardedCopyLiveInterval(
+ const MachineInstr &Copy, const MachineInstr &UseMI, unsigned OrigUseReg,
+ unsigned NewUseReg, unsigned NewUseSubReg) {
+
+ assert(TRI->isSubRegisterEq(getPhysReg(OrigUseReg, 0),
+ getFullPhysReg(Copy.getOperand(0))) &&
+ "OrigUseReg mismatch");
+ assert(TRI->isSubRegisterEq(getFullPhysReg(Copy.getOperand(1)),
+ getPhysReg(NewUseReg, 0)) &&
+ "NewUseReg mismatch");
+
+ // Extend live range starting from COPY early-clobber slot, since that
+ // is where the original src live range ends.
+ SlotIndex CopyUseIdx =
+ Indexes->getInstructionIndex(Copy).getRegSlot(true /*=EarlyClobber*/);
+ SlotIndex UseIdx = Indexes->getInstructionIndex(UseMI).getRegSlot();
+ if (TargetRegisterInfo::isVirtualRegister(NewUseReg)) {
+ LiveInterval &LI = LIS->getInterval(NewUseReg);
+ LI.extendInBlock(CopyUseIdx, UseIdx);
+ LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(NewUseSubReg);
+ for (auto &S : LI.subranges())
+ if ((S.LaneMask & UseMask).any() && S.find(CopyUseIdx))
+ S.extendInBlock(CopyUseIdx, UseIdx);
+ } else {
+ assert(NewUseSubReg == 0 && "Unexpected subreg on physical register!");
+ for (MCRegUnitIterator UI(NewUseReg, TRI); UI.isValid(); ++UI) {
+ LiveRange &LR = LIS->getRegUnit(*UI);
+ LR.extendInBlock(CopyUseIdx, UseIdx);
+ }
+ }
+
+ if (!TargetRegisterInfo::isVirtualRegister(OrigUseReg))
+ return;
+
+ LiveInterval &LI = LIS->getInterval(OrigUseReg);
+
+ // Can happen for undef uses.
+ if (LI.empty())
+ return;
+
+ SlotIndex UseIndex = Indexes->getInstructionIndex(UseMI);
+ const LiveRange::Segment *UseSeg = LI.getSegmentContaining(UseIndex);
+
+ // Only shrink if forwarded use is the end of a segment.
+ if (UseSeg->end != UseIndex.getRegSlot())
+ return;
+
+ SmallVector<MachineInstr *, 4> DeadInsts;
+ LIS->shrinkToUses(&LI, &DeadInsts);
+ if (!DeadInsts.empty()) {
+ SmallVector<unsigned, 8> NewRegs;
+ LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, nullptr, this)
+ .eliminateDeadDefs(DeadInsts);
+ }
+}
+
+void MachineCopyPropagation::LRE_WillEraseInstruction(MachineInstr *MI) {
+ // Remove this COPY from further consideration for forwarding.
+ ClobberRegister(getFullPhysReg(MI->getOperand(0)));
+ Changed = true;
+}
+
+/// Look for available copies whose destination register is used by \p MI and
+/// replace the use in \p MI with the copy's source register.
+void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
+ if (AvailCopyMap.empty())
+ return;
+
+ // Look for non-tied explicit vreg uses that have an active COPY
+ // instruction that defines the physical register allocated to them.
+ // Replace the vreg with the source of the active COPY.
+ for (MachineOperand &MOUse : MI.explicit_uses()) {
+ if (!MOUse.isReg() || MOUse.isTied())
+ continue;
+
+ unsigned UseReg = MOUse.getReg();
+ if (!UseReg)
+ continue;
+
+ if (TargetRegisterInfo::isVirtualRegister(UseReg))
+ UseReg = VRM->getPhys(UseReg);
+ else if (MI.isCall() || MI.isReturn() || MI.isInlineAsm() ||
+ MI.hasUnmodeledSideEffects() || MI.isDebugValue() || MI.isKill())
+ // Some instructions seem to have ABI uses e.g. not marked as
+ // implicit, which can lead to forwarding them when we shouldn't, so
+ // restrict the types of instructions we forward physical regs into.
+ continue;
+
+ // Don't forward COPYs via non-allocatable regs since they can have
+ // non-standard semantics.
+ if (!MRI->isAllocatable(UseReg))
+ continue;
+
+ auto CI = AvailCopyMap.find(UseReg);
+ if (CI == AvailCopyMap.end())
+ continue;
+
+ MachineInstr &Copy = *CI->second;
+ MachineOperand &CopyDst = Copy.getOperand(0);
+ MachineOperand &CopySrc = Copy.getOperand(1);
+
+ // Don't forward COPYs that are already NOPs due to register assignment.
+ if (getPhysReg(CopyDst) == getPhysReg(CopySrc))
+ continue;
+
+ // FIXME: Don't handle partial uses of wider COPYs yet.
+ if (CopyDst.getSubReg() != 0 || UseReg != getPhysReg(CopyDst))
+ continue;
+
+ // Don't forward COPYs of non-allocatable regs unless they are constant.
+ unsigned CopySrcReg = CopySrc.getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(CopySrcReg) &&
+ !MRI->isAllocatable(CopySrcReg) && !MRI->isConstantPhysReg(CopySrcReg))
+ continue;
+
+ if (!isForwardableRegClassCopy(Copy, MI))
+ continue;
+
+ unsigned NewUseReg, NewUseSubReg;
+ bool SubRegOK;
+ std::tie(NewUseReg, NewUseSubReg, SubRegOK) =
+ checkUseSubReg(CopySrc, MOUse);
+ if (!SubRegOK)
+ continue;
+
+ if (hasImplicitOverlap(MI, MOUse))
+ continue;
+
+ DEBUG(dbgs() << "MCP: Replacing "
+ << PrintReg(MOUse.getReg(), TRI, MOUse.getSubReg())
+ << "\n with "
+ << PrintReg(NewUseReg, TRI, CopySrc.getSubReg())
+ << "\n in "
+ << MI
+ << " from "
+ << Copy);
+
+ narrowRegClass(MI, MOUse, NewUseReg, NewUseSubReg);
+
+ unsigned OrigUseReg = MOUse.getReg();
+ MOUse.setReg(NewUseReg);
+ MOUse.setSubReg(NewUseSubReg);
+
+ DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n");
+
+ if (PreRegRewrite)
+ updateForwardedCopyLiveInterval(Copy, MI, OrigUseReg, NewUseReg,
+ NewUseSubReg);
+ else
+ for (MachineInstr &KMI :
+ make_range(Copy.getIterator(), std::next(MI.getIterator())))
+ KMI.clearRegisterKills(NewUseReg, TRI);
+
+ ++NumCopyForwards;
+ Changed = true;
+ }
+}
+
void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");
++I;
if (MI->isCopy()) {
- unsigned Def = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
-
- assert(!TargetRegisterInfo::isVirtualRegister(Def) &&
- !TargetRegisterInfo::isVirtualRegister(Src) &&
- "MachineCopyPropagation should be run after register allocation!");
+ unsigned Def = getPhysReg(MI->getOperand(0));
+ unsigned Src = getPhysReg(MI->getOperand(1));
// The two copies cancel out and the source of the first copy
// hasn't been overridden, eliminate the second one. e.g.
// %ECX<def> = COPY %EAX
// =>
// %ECX<def> = COPY %EAX
- if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
- continue;
+ if (!PreRegRewrite)
+ if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
+ continue;
+
+ forwardUses(*MI);
+
+ // Src may have been changed by forwardUses()
+ Src = getPhysReg(MI->getOperand(1));
+ unsigned DefClobber = getFullPhysReg(MI->getOperand(0));
+ unsigned SrcClobber = getFullPhysReg(MI->getOperand(1));
// If Src is defined by a previous copy, the previous copy cannot be
// eliminated.
DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump());
// Copy is now a candidate for deletion.
- if (!MRI->isReserved(Def))
+ // Only look for dead COPYs if we're not running just before
+ // VirtRegRewriter, since presumably these COPYs will have already been
+ // removed.
+ if (!PreRegRewrite && !MRI->isReserved(Def))
MaybeDeadCopies.insert(MI);
// If 'Def' is previously source of another copy, then this earlier copy's
// %xmm2<def> = copy %xmm0
// ...
// %xmm2<def> = copy %xmm9
- ClobberRegister(Def);
+ ClobberRegister(DefClobber);
for (const MachineOperand &MO : MI->implicit_operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
- unsigned Reg = MO.getReg();
+ unsigned Reg = getFullPhysReg(MO);
if (!Reg)
continue;
ClobberRegister(Reg);
// Remember source that's copied to Def. Once it's clobbered, then
// it's no longer available for copy propagation.
- RegList &DestList = SrcMap[Src];
- if (!is_contained(DestList, Def))
- DestList.push_back(Def);
+ RegList &DestList = SrcMap[SrcClobber];
+ if (!is_contained(DestList, DefClobber))
+ DestList.push_back(DefClobber);
continue;
}
+ // Clobber any earlyclobber regs first.
+ for (const MachineOperand &MO : MI->operands())
+ if (MO.isReg() && MO.isEarlyClobber()) {
+ unsigned Reg = getFullPhysReg(MO);
+ // If we have a tied earlyclobber, that means it is also read by this
+ // instruction, so we need to make sure we don't remove it as dead
+ // later.
+ if (MO.isTied())
+ ReadRegister(Reg);
+ ClobberRegister(Reg);
+ }
+
+ forwardUses(*MI);
+
// Not a copy.
SmallVector<unsigned, 2> Defs;
const MachineOperand *RegMask = nullptr;
RegMask = &MO;
if (!MO.isReg())
continue;
- unsigned Reg = MO.getReg();
+ unsigned Reg = getFullPhysReg(MO);
if (!Reg)
continue;
- assert(!TargetRegisterInfo::isVirtualRegister(Reg) &&
- "MachineCopyPropagation should be run after register allocation!");
-
- if (MO.isDef()) {
+ if (MO.isDef() && !MO.isEarlyClobber()) {
Defs.push_back(Reg);
continue;
} else if (MO.readsReg())
// since we don't want to trust live-in lists.
if (MBB.succ_empty()) {
for (MachineInstr *MaybeDead : MaybeDeadCopies) {
+ DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
+ MaybeDead->dump());
assert(!MRI->isReserved(MaybeDead->getOperand(0).getReg()));
MaybeDead->eraseFromParent();
Changed = true;
TRI = MF.getSubtarget().getRegisterInfo();
TII = MF.getSubtarget().getInstrInfo();
MRI = &MF.getRegInfo();
+ this->MF = &MF;
+ if (PreRegRewrite) {
+ Indexes = &getAnalysis<SlotIndexes>();
+ LIS = &getAnalysis<LiveIntervals>();
+ VRM = &getAnalysis<VirtRegMap>();
+ }
+ NoSubRegLiveness = !MRI->subRegLivenessEnabled();
for (MachineBasicBlock &MBB : MF)
CopyPropagateBlock(MBB);
return Changed;
}
-
cl::desc("Disable Codegen Prepare"));
static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
cl::desc("Disable Copy Propagation pass"));
+static cl::opt<bool> DisableCopyPropPreRegRewrite("disable-copyprop-prerewrite", cl::Hidden,
+ cl::desc("Disable Copy Propagation Pre-Register Re-write pass"));
static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining",
cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
static cl::opt<bool> EnableImplicitNullChecks(
if (StandardID == &MachineCopyPropagationID)
return applyDisable(TargetID, DisableCopyProp);
+ if (StandardID == &MachineCopyPropagationPreRegRewriteID)
+ return applyDisable(TargetID, DisableCopyPropPreRegRewrite);
+
return TargetID;
}
// Allow targets to change the register assignments before rewriting.
addPreRewrite();
+ // Copy propagate to forward register uses and try to eliminate COPYs that
+ // were not coalesced.
+ addPass(&MachineCopyPropagationPreRegRewriteID);
+
// Finally rewrite virtual registers.
addPass(&VirtRegRewriterID);
; CHECK-LABEL: halfword:
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldrh [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #1]
-; CHECK: strh [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #1]
+; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
+; CHECK: strh [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #1]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
; CHECK-LABEL: word:
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldr [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #2]
-; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #2]
+; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
+; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #2]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
; CHECK-LABEL: doubleword:
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldr [[REG1:x[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #3]
-; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #3]
+; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
+; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #3]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
; CHECK: add.2d v[[REG:[0-9]+]], v0, v1
; CHECK: add d[[REG3:[0-9]+]], d[[REG]], d1
; CHECK: sub d[[REG2:[0-9]+]], d[[REG]], d1
-; Without advanced copy optimization, we end up with cross register
-; banks copies that cannot be coalesced.
-; CHECK-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
-; With advanced copy optimization, we end up with just one copy
-; to insert the computed high part into the V register.
-; CHECK-OPT-NOT: fmov
+; CHECK-NOT: fmov
; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
-; CHECK-NOOPT: fmov d0, [[COPY_REG3]]
-; CHECK-OPT-NOT: fmov
+; CHECK-NOT: fmov
; CHECK: ins.d v0[1], [[COPY_REG2]]
; CHECK-NEXT: ret
;
; GENERIC: add v[[REG:[0-9]+]].2d, v0.2d, v1.2d
; GENERIC: add d[[REG3:[0-9]+]], d[[REG]], d1
; GENERIC: sub d[[REG2:[0-9]+]], d[[REG]], d1
-; GENERIC-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
-; GENERIC-OPT-NOT: fmov
+; GENERIC-NOT: fmov
; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
-; GENERIC-NOOPT: fmov d0, [[COPY_REG3]]
-; GENERIC-OPT-NOT: fmov
+; GENERIC-NOT: fmov
; GENERIC: ins v0.d[1], [[COPY_REG2]]
; GENERIC-NEXT: ret
%add = add <2 x i64> %a, %b
define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp {
entry:
; CHECK-LABEL: t:
-; CHECK: mov x0, [[REG1:x[0-9]+]]
-; CHECK: mov x1, [[REG2:x[0-9]+]]
+; CHECK: mov [[REG2:x[0-9]+]], x3
+; CHECK: mov [[REG1:x[0-9]+]], x2
+; CHECK: mov x0, x2
+; CHECK: mov x1, x3
; CHECK: bl _foo
; CHECK: mov x0, [[REG1]]
; CHECK: mov x1, [[REG2]]
; CHECK-LABEL: test_phi:
; CHECK: mov x[[PTR:[0-9]+]], x0
-; CHECK: ldr h[[AB:[0-9]+]], [x[[PTR]]]
+; CHECK: ldr h[[AB:[0-9]+]], [x0]
; CHECK: [[LOOP:LBB[0-9_]+]]:
; CHECK: mov.16b v[[R:[0-9]+]], v[[AB]]
; CHECK: ldr h[[AB]], [x[[PTR]]]
%val = zext i1 %test to i32
; CHECK: cset {{[xw][0-9]+}}, ne
+; CHECK: mov [[RHSCOPY:w[0-9]+]], [[RHS]]
+; CHECK: mov [[LHSCOPY:w[0-9]+]], [[LHS]]
+
store i32 %val, i32* @var
call void @bar()
; Currently, the comparison is emitted again. An MSR/MRS pair would also be
; acceptable, but assuming the call preserves NZCV is not.
br i1 %test, label %iftrue, label %iffalse
-; CHECK: cmp [[LHS]], [[RHS]]
+; CHECK: cmp [[LHSCOPY]], [[RHSCOPY]]
; CHECK: b.eq
iftrue:
define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg) {
;CHECK-LABEL: test
entry:
-; A53: mov [[DATA:w[0-9]+]], w1
; A53: str q{{[0-9]+}}, {{.*}}
; A53: str q{{[0-9]+}}, {{.*}}
-; A53: str [[DATA]], {{.*}}
+; A53: str w1, {{.*}}
%0 = bitcast %struct1* %fde to i8*
tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 8, i1 false)
define void @test(i32 %px) {
; CHECK_LABEL: test:
; CHECK_LABEL: %entry
-; CHECK: subs
-; CHECK-NEXT: csel
+; CHECK: subs [[REG0:w[0-9]+]],
+; CHECK: csel {{w[0-9]+}}, wzr, [[REG0]]
entry:
%sub = add nsw i32 %px, -1
%cmp = icmp slt i32 %px, 1
}
; GCN-LABEL: {{^}}call_void_func_byval_struct_kernel:
-; GCN: s_mov_b32 s33, s7
-; GCN: s_add_u32 s32, s33, 0xa00{{$}}
+; GCN: s_add_u32 s32, s7, 0xa00{{$}}
; GCN-DAG: v_mov_b32_e32 [[NINE:v[0-9]+]], 9
; GCN-DAG: v_mov_b32_e32 [[THIRTEEN:v[0-9]+]], 13
-; GCN-DAG: buffer_store_dword [[NINE]], off, s[0:3], s33 offset:8
-; GCN: buffer_store_dword [[THIRTEEN]], off, s[0:3], s33 offset:24
+; GCN-DAG: buffer_store_dword [[NINE]], off, s[0:3], s7 offset:8
+; GCN: buffer_store_dword [[THIRTEEN]], off, s[0:3], s7 offset:24
+
+; GCN: s_mov_b32 s33, s7
; GCN-DAG: s_add_u32 s32, s32, 0x800{{$}}
-; GCN-DAG: buffer_load_dword [[LOAD0:v[0-9]+]], off, s[0:3], s33 offset:8
-; GCN-DAG: buffer_load_dword [[LOAD1:v[0-9]+]], off, s[0:3], s33 offset:12
-; GCN-DAG: buffer_load_dword [[LOAD2:v[0-9]+]], off, s[0:3], s33 offset:16
-; GCN-DAG: buffer_load_dword [[LOAD3:v[0-9]+]], off, s[0:3], s33 offset:20
+; GCN-DAG: buffer_load_dword [[LOAD0:v[0-9]+]], off, s[0:3], s{{7|33}} offset:8
+; GCN-DAG: buffer_load_dword [[LOAD1:v[0-9]+]], off, s[0:3], s{{7|33}} offset:12
+; GCN-DAG: buffer_load_dword [[LOAD2:v[0-9]+]], off, s[0:3], s{{7|33}} offset:16
+; GCN-DAG: buffer_load_dword [[LOAD3:v[0-9]+]], off, s[0:3], s{{7|33}} offset:20
; GCN-DAG: buffer_store_dword [[LOAD0]], off, s[0:3], s32 offset:4{{$}}
; GCN-DAG: buffer_store_dword [[LOAD1]], off, s[0:3], s32 offset:8
; GCN-LABEL: {{^}}test_call_external_void_func_i1_signext:
; MESA: s_mov_b32 s33, s3{{$}}
+; MESA-DAG: s_mov_b32 s32, s3{{$}}
; HSA: s_mov_b32 s33, s9{{$}}
+; HSA: s_mov_b32 s32, s9{{$}}
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i1_signext@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i1_signext@rel32@hi+4
; GCN-NEXT: buffer_load_ubyte [[VAR:v[0-9]+]]
; HSA-NEXT: s_mov_b32 s4, s33
-; HSA-NEXT: s_mov_b32 s32, s33
; MESA-DAG: s_mov_b32 s4, s33{{$}}
-; MESA-DAG: s_mov_b32 s32, s33{{$}}
; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 1
; FIXME: load should be scheduled before getpc
; GCN-LABEL: {{^}}test_call_external_void_func_i1_zeroext:
; MESA: s_mov_b32 s33, s3{{$}}
+; MESA: s_mov_b32 s32, s3{{$}}
+; HSA: s_mov_b32 s32, s9{{$}}
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i1_zeroext@rel32@lo+4
; GCN-NEXT: buffer_load_ubyte v0
; GCN-DAG: s_mov_b32 s4, s33{{$}}
-; GCN-DAG: s_mov_b32 s32, s33{{$}}
; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
; GCN-LABEL: {{^}}test_call_external_void_func_i8_imm:
; MESA-DAG: s_mov_b32 s33, s3{{$}}
+; MESA: s_mov_b32 s32, s3{{$}}
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8@rel32@hi+4
; GCN-NEXT: v_mov_b32_e32 v0, 0x7b
-; HSA-DAG: s_mov_b32 s4, s33{{$}}
-; GCN-DAG: s_mov_b32 s32, s33{{$}}
+; HSA-DAG: s_mov_b32 s4, s9{{$}}
+; HSA-DAG: s_mov_b32 s32, s9{{$}}
; GCN: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
; GCN-NEXT: s_endpgm
; FIXME: don't wait before call
; GCN-LABEL: {{^}}test_call_external_void_func_i8_signext:
-; HSA-DAG: s_mov_b32 s33, s9{{$}}
; MESA-DAG: s_mov_b32 s33, s3{{$}}
+; MESA-DAG: s_mov_b32 s32, s3{{$}}
; GCN-DAG: buffer_load_sbyte v0
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8_signext@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8_signext@rel32@hi+4
-; GCN-DAG: s_mov_b32 s4, s33
-; GCN-DAG: s_mov_b32 s32, s3
+; MESA-DAG: s_mov_b32 s4, s33
+; HSA-DAG: s_mov_b32 s4, s9
+; HSA-DAG: s_mov_b32 s32, s9{{$}}
; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
; GCN-LABEL: {{^}}test_call_external_void_func_i8_zeroext:
; MESA-DAG: s_mov_b32 s33, s3{{$}}
-; HSA-DAG: s_mov_b32 s33, s9{{$}}
+; MESA-DAG: s_mov_b32 s32, s3{{$}}
; GCN-DAG: buffer_load_ubyte v0
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i8_zeroext@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i8_zeroext@rel32@hi+4
-; GCN-DAG: s_mov_b32 s4, s33
-; GCN-DAG: s_mov_b32 s32, s33
+; MESA-DAG: s_mov_b32 s4, s33
+; HSA-DAG: s_mov_b32 s4, s9
+; HSA-DAG: s_mov_b32 s32, s9
; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
; GCN-DAG: v_mov_b32_e32 v0, 0x7b{{$}}
; GCN-DAG: s_mov_b32 s4, s33
-; GCN-DAG: s_mov_b32 s32, s33
+; MESA-DAG: s_mov_b32 s32, s1
+; HSA-DAG: s_mov_b32 s32, s7
; GCN: s_swappc_b64
define amdgpu_kernel void @test_call_external_void_func_i16_imm() #0 {
; GCN-LABEL: {{^}}test_call_external_void_func_i16_signext:
; MESA-DAG: s_mov_b32 s33, s3{{$}}
+; MESA-DAG: s_mov_b32 s32, s3{{$}}
; GCN-DAG: buffer_load_sshort v0
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i16_signext@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i16_signext@rel32@hi+4
-; GCN-DAG: s_mov_b32 s4, s33
-; GCN-DAG: s_mov_b32 s32, s33
+; MESA-DAG: s_mov_b32 s4, s33
+; HSA-DAG: s_mov_b32 s4, s9
+; HSA-DAG: s_mov_b32 s32, s9
; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
; GCN-LABEL: {{^}}test_call_external_void_func_i16_zeroext:
; MESA-DAG: s_mov_b32 s33, s3{{$}}
+; MESA-DAG: s_mov_b32 s32, s3{{$}}
; GCN-DAG: buffer_load_ushort v0
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i16_zeroext@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i16_zeroext@rel32@hi+4
-; GCN-DAG: s_mov_b32 s4, s33
-; GCN-DAG: s_mov_b32 s32, s33
+; MESA-DAG: s_mov_b32 s4, s33
+; HSA-DAG: s_mov_b32 s4, s9
+; HSA-DAG: s_mov_b32 s32, s9
; GCN: s_waitcnt vmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
; GCN-LABEL: {{^}}test_call_external_void_func_i32_imm:
; MESA-DAG: s_mov_b32 s33, s3{{$}}
+; MESA-DAG: s_mov_b32 s32, s3{{$}}
; GCN: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i32@rel32@lo+4
; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i32@rel32@hi+4
; GCN: v_mov_b32_e32 v0, 42
-; GCN-DAG: s_mov_b32 s4, s33
-; GCN-DAG: s_mov_b32 s32, s33
+; MESA-DAG: s_mov_b32 s4, s33
+; HSA-DAG: s_mov_b32 s4, s9
+; HSA-DAG: s_mov_b32 s32, s9
; GCN: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
; GCN-NEXT: s_endpgm
}
; GCN-LABEL: {{^}}test_call_external_void_func_v32i32_i32:
-; HSA-DAG: s_mov_b32 s33, s9
-; HSA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s33, 0x100{{$}}
+; HSA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s9, 0x100{{$}}
; MESA-DAG: s_mov_b32 s33, s3{{$}}
-; MESA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s33, 0x100{{$}}
+; MESA-DAG: s_add_u32 [[SP_REG:s[0-9]+]], s3, 0x100{{$}}
; GCN-DAG: buffer_load_dword [[VAL1:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
; GCN-DAG: buffer_load_dwordx4 v[0:3], off
}
; GCN-LABEL: {{^}}test_call_external_void_func_byval_struct_i8_i32:
-; GCN-DAG: s_add_u32 [[SP:s[0-9]+]], s33, 0x400{{$}}
+; MESA-DAG: s_add_u32 [[SP:s[0-9]+]], s1, 0x400{{$}}
+; HSA-DAG: s_add_u32 [[SP:s[0-9]+]], s7, 0x400{{$}}
; GCN-DAG: v_mov_b32_e32 [[VAL0:v[0-9]+]], 3
; GCN-DAG: v_mov_b32_e32 [[VAL1:v[0-9]+]], 8
-; MESA-DAG: buffer_store_byte [[VAL0]], off, s[36:39], s33 offset:8
-; MESA-DAG: buffer_store_dword [[VAL1]], off, s[36:39], s33 offset:12
+; MESA-DAG: buffer_store_byte [[VAL0]], off, s[36:39], s1 offset:8
+; MESA-DAG: buffer_store_dword [[VAL1]], off, s[36:39], s1 offset:12
-; HSA-DAG: buffer_store_byte [[VAL0]], off, s[0:3], s33 offset:8
-; HSA-DAG: buffer_store_dword [[VAL1]], off, s[0:3], s33 offset:12
+; HSA-DAG: s_mov_b32 s33, s7
+; HSA-DAG: buffer_store_byte [[VAL0]], off, s[0:3], s{{7|33}} offset:8
+; HSA-DAG: buffer_store_dword [[VAL1]], off, s[0:3], s{{7|33}} offset:12
; GCN: s_add_u32 [[SP]], [[SP]], 0x200
-; HSA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[0:3], s33 offset:8
-; HSA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[0:3], s33 offset:12
+; HSA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[0:3], s{{7|33}} offset:8
+; HSA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[0:3], s{{7|33}} offset:12
; HSA: buffer_store_dword [[RELOAD_VAL1]], off, s[0:3], [[SP]] offset:8
; HSA: buffer_store_dword [[RELOAD_VAL0]], off, s[0:3], [[SP]] offset:4
-; MESA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[36:39], s33 offset:8
-; MESA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[36:39], s33 offset:12
+; MESA: buffer_load_dword [[RELOAD_VAL0:v[0-9]+]], off, s[36:39], s1 offset:8
+; MESA: buffer_load_dword [[RELOAD_VAL1:v[0-9]+]], off, s[36:39], s1 offset:12
; MESA: buffer_store_dword [[RELOAD_VAL1]], off, s[36:39], [[SP]] offset:8
; MESA: buffer_store_dword [[RELOAD_VAL0]], off, s[36:39], [[SP]] offset:4
; GCN: buffer_store_dword [[RELOAD_VAL1]], off, s{{\[[0-9]+:[0-9]+\]}}, [[SP]] offset:8
; GCN: buffer_store_dword [[RELOAD_VAL0]], off, s{{\[[0-9]+:[0-9]+\]}}, [[SP]] offset:4
; GCN-NEXT: s_swappc_b64
-; GCN-DAG: buffer_load_ubyte [[LOAD_OUT_VAL0:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, [[FP_REG]] offset:16
-; GCN-DAG: buffer_load_dword [[LOAD_OUT_VAL1:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, [[FP_REG]] offset:20
+; GCN-DAG: buffer_load_ubyte [[LOAD_OUT_VAL0:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s33 offset:16
+; GCN-DAG: buffer_load_dword [[LOAD_OUT_VAL1:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s33 offset:20
; GCN: s_sub_u32 [[SP]], [[SP]], 0x200
; GCN: buffer_store_byte [[LOAD_OUT_VAL0]], off
declare void @external_void_func_void() #0
; GCN-LABEL: {{^}}test_kernel_call_external_void_func_void_clobber_s30_s31_call_external_void_func_void:
-; GCN: s_mov_b32 s33, s7
; GCN: s_getpc_b64 s[34:35]
; GCN-NEXT: s_add_u32 s34, s34,
; GCN-NEXT: s_addc_u32 s35, s35,
-; GCN-NEXT: s_mov_b32 s4, s33
-; GCN-NEXT: s_mov_b32 s32, s33
+; GCN-NEXT: s_mov_b32 s4, s7
+; GCN-NEXT: s_mov_b32 s33, s7
+; GCN-NEXT: s_mov_b32 s32, s7
; GCN: s_swappc_b64 s[30:31], s[34:35]
; GCN-NEXT: s_mov_b32 s4, s33
}
; GCN-LABEL: {{^}}test_call_void_func_void_preserves_s33:
-; GCN: s_mov_b32 s34, s9
; GCN: ; def s33
; GCN-NEXT: #ASMEND
; GCN: s_getpc_b64 s[6:7]
; GCN-NEXT: s_add_u32 s6, s6, external_void_func_void@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, external_void_func_void@rel32@hi+4
-; GCN-NEXT: s_mov_b32 s4, s34
-; GCN-NEXT: s_mov_b32 s32, s34
+; GCN-NEXT: s_mov_b32 s4, s9
+; GCN-NEXT: s_mov_b32 s32, s9
; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7]
; GCN-NEXT: ;;#ASMSTART
; GCN-NEXT: ; use s33
}
; GCN-LABEL: {{^}}test_call_void_func_void_preserves_v32:
-; GCN: s_mov_b32 s33, s9
; GCN: ; def v32
; GCN-NEXT: #ASMEND
; GCN: s_getpc_b64 s[6:7]
; GCN-NEXT: s_add_u32 s6, s6, external_void_func_void@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, external_void_func_void@rel32@hi+4
-; GCN-NEXT: s_mov_b32 s4, s33
-; GCN-NEXT: s_mov_b32 s32, s33
+; GCN-NEXT: s_mov_b32 s4, s9
+; GCN-NEXT: s_mov_b32 s32, s9
; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7]
; GCN-NEXT: ;;#ASMSTART
; GCN-NEXT: ; use v32
; GCN-LABEL: {{^}}test_call_void_func_void_clobber_s33:
; GCN: s_mov_b32 s33, s7
+; GCN: s_mov_b32 s32, s7
; GCN: s_getpc_b64
; GCN-NEXT: s_add_u32
; GCN-NEXT: s_addc_u32
; GCN-NEXT: s_mov_b32 s4, s33
-; GCN-NEXT: s_mov_b32 s32, s33
; GCN: s_swappc_b64
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @test_call_void_func_void_clobber_s33() #0 {
; GCN: enable_sgpr_workgroup_id_z = 0
; GCN-NOT: s6
-; GCN: s_mov_b32 s33, s7
+; GCN: s_mov_b32 s4, s7
; GCN-NOT: s6
-; GCN: s_mov_b32 s4, s33
-; GCN-NOT: s6
-; GCN: s_mov_b32 s32, s33
+; GCN: s_mov_b32 s32, s7
; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_use_workgroup_id_x() #1 {
call void @use_workgroup_id_x()
; GCN: enable_sgpr_workgroup_id_z = 0
; GCN: s_mov_b32 s33, s8
+; GCN: s_mov_b32 s32, s8
; GCN: s_mov_b32 s4, s33
; GCN: s_mov_b32 s6, s7
-; GCN: s_mov_b32 s32, s33
; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_use_workgroup_id_y() #1 {
call void @use_workgroup_id_y()
; GCN: s_mov_b32 s33, s8
; GCN-NOT: s6
; GCN-NOT: s7
-; GCN: s_mov_b32 s4, s33
+; GCN: s_mov_b32 s32, s8
; GCN-NOT: s6
; GCN-NOT: s7
-; GCN: s_mov_b32 s32, s33
+; GCN: s_mov_b32 s4, s33
; GCN-NOT: s6
; GCN-NOT: s7
; GCN: s_swappc_b64
; GCN: enable_sgpr_workgroup_id_y = 1
; GCN: enable_sgpr_workgroup_id_z = 1
-; GCN: s_mov_b32 s33, s9
-
; GCN-NOT: s6
; GCN-NOT: s7
; GCN-NOT: s8
-; GCN: s_mov_b32 s4, s33
+; GCN: s_mov_b32 s4, s9
; GCN-NOT: s6
; GCN-NOT: s7
; GCN-NOT: s8
-; GCN: s_mov_b32 s32, s33
+; GCN: s_mov_b32 s32, s9
; GCN-NOT: s6
; GCN-NOT: s7
; GCN-NOT: s6
; GCN-NOT: s7
-; GCN: s_mov_b32 s4, s33
+; GCN: s_mov_b32 s32, s8
; GCN-NOT: s6
; GCN-NOT: s7
-; GCN: s_mov_b32 s32, s33
+; GCN: s_mov_b32 s4, s33
; GCN-NOT: s6
; GCN-NOT: s7
; GCN: enable_sgpr_workgroup_id_y = 1
; GCN: enable_sgpr_workgroup_id_z = 1
-; GCN: s_mov_b32 s33, s9
; GCN: s_mov_b32 s6, s7
-; GCN: s_mov_b32 s4, s33
+; GCN: s_mov_b32 s4, s9
; GCN: s_mov_b32 s7, s8
-; GCN: s_mov_b32 s32, s33
+; GCN: s_mov_b32 s32, s9
; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_use_workgroup_id_yz() #1 {
call void @use_workgroup_id_yz()
; GCN: enable_sgpr_workgroup_id_y = 0
; GCN: enable_sgpr_workgroup_id_z = 0
-; GCN-DAG: s_mov_b32 s33, s7
; GCN-DAG: v_mov_b32_e32 v0, 0x22b
; GCN-NOT: s6
-; GCN: s_mov_b32 s4, s33
+; GCN: s_mov_b32 s4, s7
; GCN-NOT: s6
-; GCN-DAG: s_mov_b32 s32, s33
+; GCN-DAG: s_mov_b32 s32, s7
; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_x() #1 {
call void @other_arg_use_workgroup_id_x(i32 555)
; GCN: enable_sgpr_workgroup_id_z = 0
; GCN-DAG: s_mov_b32 s33, s8
+; GCN-DAG: s_mov_b32 s32, s8
; GCN-DAG: v_mov_b32_e32 v0, 0x22b
; GCN: s_mov_b32 s4, s33
; GCN-DAG: s_mov_b32 s6, s7
-; GCN-DAG: s_mov_b32 s32, s33
; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_y() #1 {
call void @other_arg_use_workgroup_id_y(i32 555)
; GCN: enable_sgpr_workgroup_id_z = 1
; GCN: s_mov_b32 s33, s8
+; GCN: s_mov_b32 s32, s8
; GCN-DAG: v_mov_b32_e32 v0, 0x22b
; GCN: s_mov_b32 s4, s33
; GCN-DAG: s_mov_b32 s6, s7
-; GCN: s_mov_b32 s32, s33
; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_z() #1 {
call void @other_arg_use_workgroup_id_z(i32 555)
; GCN: enable_sgpr_dispatch_id = 1
; GCN: enable_sgpr_flat_scratch_init = 1
-; GCN: s_mov_b32 s33, s17
; GCN: s_mov_b64 s[12:13], s[10:11]
; GCN: s_mov_b64 s[10:11], s[8:9]
; GCN: s_mov_b64 s[8:9], s[6:7]
; GCN: s_mov_b64 s[6:7], s[4:5]
-; GCN: s_mov_b32 s4, s33
-; GCN: s_mov_b32 s32, s33
+; GCN: s_mov_b32 s4, s17
+; GCN: s_mov_b32 s32, s17
; GCN: s_swappc_b64
define amdgpu_kernel void @kern_indirect_use_every_sgpr_input() #1 {
call void @use_every_sgpr_input()
; GCN: s_mov_b32 s5, s32
; GCN: s_add_u32 s32, s32, 0x300
-; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-9]+]], s14
-; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15
-; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16
-; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7]
-; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9]
-; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11]
+; GCN: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7]
+; GCN: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11]
+; GCN: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9]
+
+; GCN: s_mov_b32 s6, s14
+; GCN: s_mov_b32 s7, s15
+; GCN: s_mov_b32 s8, s16
+
+; GCN: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16
+; GCN: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15
+; GCN: s_mov_b32 [[SAVE_X:s[0-9]+]], s14
-; GCN-DAG: s_mov_b32 s6, [[SAVE_X]]
-; GCN-DAG: s_mov_b32 s7, [[SAVE_Y]]
-; GCN-DAG: s_mov_b32 s8, [[SAVE_Z]]
; GCN: s_swappc_b64
; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4
; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x:
; GCN: enable_vgpr_workitem_id = 0
+; GCN: s_mov_b32 s32, s7
; GCN: s_mov_b32 s33, s7
-; GCN: s_mov_b32 s32, s33
; GCN: buffer_store_dword v0, off, s[0:3], s32 offset:8
; GCN: s_mov_b32 s4, s33
; GCN: s_swappc_b64
; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x_byval:
; GCN: enable_vgpr_workitem_id = 0
-; GCN: s_mov_b32 s33, s7
-; GCN: s_add_u32 s32, s33, 0x200{{$}}
+; GCN: s_add_u32 s32, s7, 0x200{{$}}
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7{{$}}
+; GCN: s_add_u32 s32, s32, 0x100{{$}}
-; GCN-DAG: s_add_u32 s32, s32, 0x100{{$}}
-; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7{{$}}
-; GCN: buffer_store_dword [[K]], off, s[0:3], s33 offset:4
+
+; GCN: buffer_store_dword [[K]], off, s[0:3], s7 offset:4
; GCN: buffer_store_dword v0, off, s[0:3], s32 offset:12
+; GCN: buffer_load_dword [[RELOAD_BYVAL:v[0-9]+]], off, s[0:3], s7 offset:4
-; GCN: buffer_load_dword [[RELOAD_BYVAL:v[0-9]+]], off, s[0:3], s33 offset:4
+; GCN: s_mov_b32 s33, s7
; GCN: buffer_store_dword [[RELOAD_BYVAL]], off, s[0:3], s32 offset:4{{$}}
; GCN: v_mov_b32_e32 [[RELOAD_BYVAL]],
; GCN: s_swappc_b64
; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_xyz:
; GCN: enable_vgpr_workitem_id = 2
+; GCN: s_mov_b32 s32, s7
; GCN: s_mov_b32 s33, s7
-; GCN: s_mov_b32 s32, s33
; GCN-DAG: buffer_store_dword v0, off, s[0:3], s32 offset:8
; GCN-DAG: buffer_store_dword v1, off, s[0:3], s32 offset:12
; GCN-LABEL: {{^}}kern_call_too_many_args_use_workitem_id_x_stack_yz:
; GCN: enable_vgpr_workitem_id = 2
+; GCN: s_mov_b32 s32, s7
; GCN: s_mov_b32 s33, s7
-; GCN: s_mov_b32 s32, s33
; GCN-DAG: v_mov_b32_e32 v31, v0
; GCN-DAG: buffer_store_dword v1, off, s[0:3], s32 offset:8
; Test addressing modes when the scratch base is not a frame index.
; GCN-LABEL: {{^}}store_private_offset_i8:
-; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:8
+; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s1 offset:8
define amdgpu_kernel void @store_private_offset_i8() #0 {
store volatile i8 5, i8* inttoptr (i32 8 to i8*)
ret void
}
; GCN-LABEL: {{^}}store_private_offset_i16:
-; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s2 offset:8
+; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s1 offset:8
define amdgpu_kernel void @store_private_offset_i16() #0 {
store volatile i16 5, i16* inttoptr (i32 8 to i16*)
ret void
}
; GCN-LABEL: {{^}}store_private_offset_i32:
-; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s1 offset:8
define amdgpu_kernel void @store_private_offset_i32() #0 {
store volatile i32 5, i32* inttoptr (i32 8 to i32*)
ret void
}
; GCN-LABEL: {{^}}store_private_offset_v2i32:
-; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
+; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8
define amdgpu_kernel void @store_private_offset_v2i32() #0 {
store volatile <2 x i32> <i32 5, i32 10>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*)
ret void
}
; GCN-LABEL: {{^}}store_private_offset_v4i32:
-; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
+; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8
define amdgpu_kernel void @store_private_offset_v4i32() #0 {
store volatile <4 x i32> <i32 5, i32 10, i32 15, i32 0>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*)
ret void
}
; GCN-LABEL: {{^}}load_private_offset_i8:
-; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s2 offset:8
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s1 offset:8
define amdgpu_kernel void @load_private_offset_i8() #0 {
%load = load volatile i8, i8* inttoptr (i32 8 to i8*)
ret void
}
; GCN-LABEL: {{^}}sextload_private_offset_i8:
-; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], s8 offset:8
+; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], s3 offset:8
define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 {
%load = load volatile i8, i8* inttoptr (i32 8 to i8*)
%sextload = sext i8 %load to i32
}
; GCN-LABEL: {{^}}zextload_private_offset_i8:
-; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s8 offset:8
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s3 offset:8
define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 {
%load = load volatile i8, i8* inttoptr (i32 8 to i8*)
%zextload = zext i8 %load to i32
}
; GCN-LABEL: {{^}}load_private_offset_i16:
-; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s2 offset:8
+; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s1 offset:8
define amdgpu_kernel void @load_private_offset_i16() #0 {
%load = load volatile i16, i16* inttoptr (i32 8 to i16*)
ret void
}
; GCN-LABEL: {{^}}sextload_private_offset_i16:
-; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], s8 offset:8
+; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], s3 offset:8
define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) #0 {
%load = load volatile i16, i16* inttoptr (i32 8 to i16*)
%sextload = sext i16 %load to i32
}
; GCN-LABEL: {{^}}zextload_private_offset_i16:
-; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s8 offset:8
+; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s3 offset:8
define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) #0 {
%load = load volatile i16, i16* inttoptr (i32 8 to i16*)
%zextload = zext i16 %load to i32
}
; GCN-LABEL: {{^}}load_private_offset_i32:
-; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s1 offset:8
define amdgpu_kernel void @load_private_offset_i32() #0 {
%load = load volatile i32, i32* inttoptr (i32 8 to i32*)
ret void
}
; GCN-LABEL: {{^}}load_private_offset_v2i32:
-; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
+; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8
define amdgpu_kernel void @load_private_offset_v2i32() #0 {
%load = load volatile <2 x i32>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*)
ret void
}
; GCN-LABEL: {{^}}load_private_offset_v4i32:
-; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8
+; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8
define amdgpu_kernel void @load_private_offset_v4i32() #0 {
%load = load volatile <4 x i32>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*)
ret void
}
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset:
-; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:4095
+; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s1 offset:4095
define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 {
store volatile i8 5, i8* inttoptr (i32 4095 to i8*)
ret void
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus1:
; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000
-; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen{{$}}
+; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s1 offen{{$}}
define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 {
store volatile i8 5, i8* inttoptr (i32 4096 to i8*)
ret void
; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus2:
; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000
-; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen offset:1{{$}}
+; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s1 offen offset:1{{$}}
define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 {
store volatile i8 5, i8* inttoptr (i32 4097 to i8*)
ret void
; Uses a copy intsead of an or
; GCN: s_mov_b64 [[COPY:s\[[0-9]+:[0-9]+\]]], [[BREAK_REG]]
-; GCN: s_or_b64 [[BREAK_REG]], exec, [[COPY]]
+; GCN: s_or_b64 [[BREAK_REG]], exec, [[BREAK_REG]]
define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 {
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=OPT %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=CI -check-prefix=OPT %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=iceland -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=OPT %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=iceland -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI -check-prefix=OPTICELAND %s
; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=OPTNONE %s
; There are no stack objects, but still a private memory access. The
; shifted down to the end of the used registers.
; GCN-LABEL: {{^}}store_to_undef:
-; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
-; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
-; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
-; OPT: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
+; OPT: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s5 offen{{$}}
+; The -mcpu=iceland case doesn't copy-propagate the same as the other two opt cases because the temp registers %SGPR88_SGPR89_SGPR90_SGPR91 and %SGPR93 are marked as non-allocatable by this subtarget.
+; OPTICELAND-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
+; OPTICELAND-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
+; OPTICELAND-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
+; OPTICELAND: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
; -O0 should assume spilling, so the input scratch resource descriptor
; -should be used directly without any copies.
}
; GCN-LABEL: {{^}}store_to_inttoptr:
-; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
-; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
-; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
-; OPT: buffer_store_dword v{{[0-9]+}}, off, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offset:124{{$}}
+; OPT: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:124{{$}}
define amdgpu_kernel void @store_to_inttoptr() #0 {
store volatile i32 0, i32* inttoptr (i32 124 to i32*)
ret void
}
; GCN-LABEL: {{^}}load_from_undef:
-; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
-; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
-; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
-; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
+; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s5 offen{{$}}
define amdgpu_kernel void @load_from_undef() #0 {
%ld = load volatile i32, i32* undef
ret void
}
; GCN-LABEL: {{^}}load_from_inttoptr:
-; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
-; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
-; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s5{{$}}
-; OPT: buffer_load_dword v{{[0-9]+}}, off, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offset:124{{$}}
+; OPT: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s5 offset:124{{$}}
define amdgpu_kernel void @load_from_inttoptr() #0 {
%ld = load volatile i32, i32* inttoptr (i32 124 to i32*)
ret void
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; GCN-LABEL: {{^}}vgpr:
-; GCN: v_mov_b32_e32 v1, v0
-; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
-; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
+; GCN-DAG: v_mov_b32_e32 v1, v0
+; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm
; GCN: s_waitcnt expcnt(0)
+; GCN: v_add_f32_e32 v0, 1.0, v0
; GCN-NOT: s_endpgm
define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
bb:
}
; GCN-LABEL: {{^}}both:
-; GCN: v_mov_b32_e32 v1, v0
-; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
-; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
-; GCN-DAG: s_add_i32 s0, s3, 2
+; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm
+; GCN-DAG: v_mov_b32_e32 v1, v0
; GCN-DAG: s_mov_b32 s1, s2
-; GCN: s_mov_b32 s2, s3
; GCN: s_waitcnt expcnt(0)
+; GCN: v_add_f32_e32 v0, 1.0, v0
+; GCN-DAG: s_add_i32 s0, s3, 2
+; GCN-DAG: s_mov_b32 s2, s3
; GCN-NOT: s_endpgm
define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
bb:
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
%oldval = extractvalue { i32, i1 } %pair, 0
-; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
+; CHECK-ARMV7: mov r[[ADDR:[0-9]+]], r0
+; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r0]
; CHECK-ARMV7: cmp [[OLDVAL]], r1
; CHECK-ARMV7: bne [[FAIL_BB:\.?LBB[0-9]+_[0-9]+]]
; CHECK-ARMV7: dmb ish
; CHECK-ARMV7: dmb ish
; CHECK-ARMV7: bx lr
-; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
+; CHECK-T2: mov r[[ADDR:[0-9]+]], r0
+; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r0]
; CHECK-T2: cmp [[OLDVAL]], r1
; CHECK-T2: bne [[FAIL_BB:\.?LBB.*]]
; CHECK-T2: dmb ish
; CHECK-APPLE: beq
; CHECK-APPLE: mov r0, #16
; CHECK-APPLE: malloc
-; CHECK-APPLE: strb r{{.*}}, [{{.*}}[[ID]], #8]
+; CHECK-APPLE: strb r{{.*}}, [r0, #8]
; CHECK-APPLE: ble
; CHECK-APPLE: mov r8, [[ID]]
; MMR3: subu16 $5, $[[T19]], $[[T20]]
; MMR6: move $[[T0:[0-9]+]], $7
-; MMR6: sw $[[T0]], 8($sp)
+; MMR6: sw $7, 8($sp)
; MMR6: move $[[T1:[0-9]+]], $5
; MMR6: sw $4, 12($sp)
; MMR6: lw $[[T2:[0-9]+]], 48($sp)
ret double %r
; CHECK: @foo3
-; CHECK: xsnmsubadp [[REG:[0-9]+]], {{[0-9]+}}, [[REG]]
+; CHECK: fmr [[REG:[0-9]+]], [[REG2:[0-9]+]]
+; CHECK: xsnmsubadp [[REG]], {{[0-9]+}}, [[REG2]]
; CHECK: xsmaddmdp
; CHECK: xsmaddadp
}
; CHECK-DAG: mr [[REG:[0-9]+]], 3
; CHECK-DAG: li 0, 1076
-; CHECK: stw [[REG]],
+; CHECK-DAG: stw 3,
; CHECK: #APP
; CHECK: sc
;CHECK-LABEL: straight_test:
; test1 may have been merged with entry
;CHECK: mr [[TAGREG:[0-9]+]], 3
-;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
+;CHECK: andi. {{[0-9]+}}, [[TAGREG:[0-9]+]], 1
;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]]
;CHECK-NEXT: # %test2
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
; HARD-NEXT: std %o0, [%sp+96]
; HARD-NEXT: st %o1, [%sp+92]
; HARD-NEXT: mov %i0, %o2
-; HARD-NEXT: mov %o0, %o3
+; HARD-NEXT: mov %i1, %o3
; HARD-NEXT: mov %o1, %o4
-; HARD-NEXT: mov %o0, %o5
+; HARD-NEXT: mov %i1, %o5
; HARD-NEXT: call floatarg
; HARD: std %f0, [%i4]
; SOFT: st %i0, [%sp+104]
; CHECK-LABEL: test_load_add_i32
; CHECK: membar
-; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
-; CHECK: cas [%o0], [[V]], [[U]]
+; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
+; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
+; CHECK: cas [%o0], [[V]], [[V2]]
; CHECK: membar
define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
entry:
define i32 @b_to_bx(i32 %value) {
; CHECK-LABEL: b_to_bx:
; DISABLE: push {r7, lr}
-; CHECK: cmp r1, #49
+; CHECK: cmp r0, #49
; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]]
; ENABLE: push {r7, lr}
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl %ecx, %edx
-; CHECK-NEXT: imull %edx, %edx
+; CHECK-NEXT: imull %ecx, %edx
; CHECK-NEXT: imull %eax, %ecx
; CHECK-NEXT: imull %eax, %eax
; CHECK-NEXT: addl %edx, %eax
; CHECK-DAG: movl %edx, %[[r1:[^ ]*]]
; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]]
; CHECK-DAG: movl %[[r2]], 4(%esp)
-; CHECK-DAG: movl %[[r1]], (%esp)
+; CHECK-DAG: movl %edx, (%esp)
; CHECK: movl %esp, %[[reg:[^ ]*]]
; CHECK: pushl %[[reg]]
; CHECK: calll _addrof_i64
; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: packuswb %xmm1, %xmm2
; SSE2-NEXT: packuswb %xmm10, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: psrld $1, %xmm12
; SSE2-NEXT: pand %xmm0, %xmm12
; SSE2-NEXT: movdqu %xmm7, (%rax)
; SSE2-NEXT: movdqu %xmm11, (%rax)
; SSE2-NEXT: movdqu %xmm13, (%rax)
-; SSE2-NEXT: movdqu %xmm1, (%rax)
+; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v64i8:
; CHECK-NEXT: movq %rdx, %r14
; CHECK-NEXT: movq %rsi, %r15
; CHECK-NEXT: movq %rdi, %rbx
-; CHECK-NEXT: vmovaps (%rbx), %ymm0
+; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
-; CHECK-NEXT: vmovaps (%r15), %ymm1
+; CHECK-NEXT: vmovaps (%rsi), %ymm1
; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
-; CHECK-NEXT: vmovaps (%r14), %ymm2
+; CHECK-NEXT: vmovaps (%rdx), %ymm2
; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
; CHECK-NEXT: callq dummy
; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: subq $112, %rsp
; CHECK-NEXT: movq %rdi, %rbx
-; CHECK-NEXT: vmovups (%rbx), %zmm0
+; CHECK-NEXT: vmovups (%rdi), %zmm0
; CHECK-NEXT: vmovups %zmm0, (%rsp) ## 64-byte Spill
; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %zmm1
-; CHECK-NEXT: vmovaps %zmm1, (%rbx)
+; CHECK-NEXT: vmovaps %zmm1, (%rdi)
; CHECK-NEXT: callq _Print__512
; CHECK-NEXT: vmovups (%rsp), %zmm0 ## 64-byte Reload
; CHECK-NEXT: callq _Print__512
; KNL_X32-NEXT: movl %edi, (%esp)
; KNL_X32-NEXT: calll _test11
; KNL_X32-NEXT: movl %eax, %ebx
-; KNL_X32-NEXT: movzbl %bl, %eax
+; KNL_X32-NEXT: movzbl %al, %eax
; KNL_X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
; KNL_X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
; KNL_X32-NEXT: movl %edi, (%esp)
; KNL-NEXT: kmovw %esi, %k0
; KNL-NEXT: kshiftlw $7, %k0, %k2
; KNL-NEXT: kshiftrw $15, %k2, %k2
-; KNL-NEXT: kmovw %k2, %eax
; KNL-NEXT: kshiftlw $6, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %ecx
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftlw $1, %k0, %k0
; KNL-NEXT: kshiftrw $1, %k0, %k0
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: kshiftlw $7, %k1, %k1
+; KNL-NEXT: kshiftlw $7, %k2, %k1
; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqw %zmm0, %xmm0
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kshiftlw $7, %k1, %k2
; SKX-NEXT: kshiftrw $15, %k2, %k2
-; SKX-NEXT: kmovd %k2, %eax
; SKX-NEXT: kshiftlw $6, %k1, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1
-; SKX-NEXT: kmovd %k1, %ecx
; SKX-NEXT: vpmovm2q %k0, %zmm0
-; SKX-NEXT: kmovd %ecx, %k0
-; SKX-NEXT: vpmovm2q %k0, %zmm1
+; SKX-NEXT: vpmovm2q %k1, %zmm1
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; SKX-NEXT: vpmovq2m %zmm2, %k0
; SKX-NEXT: kshiftlb $1, %k0, %k0
; SKX-NEXT: kshiftrb $1, %k0, %k0
-; SKX-NEXT: kmovd %eax, %k1
-; SKX-NEXT: kshiftlb $7, %k1, %k1
+; SKX-NEXT: kshiftlb $7, %k2, %k1
; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: vzeroupper
; AVX512BW-NEXT: kmovd %esi, %k0
; AVX512BW-NEXT: kshiftlw $7, %k0, %k2
; AVX512BW-NEXT: kshiftrw $15, %k2, %k2
-; AVX512BW-NEXT: kmovd %k2, %eax
; AVX512BW-NEXT: kshiftlw $6, %k0, %k0
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %ecx
; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kshiftlw $1, %k0, %k0
; AVX512BW-NEXT: kshiftrw $1, %k0, %k0
-; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: kshiftlw $7, %k1, %k1
+; AVX512BW-NEXT: kshiftlw $7, %k2, %k1
; AVX512BW-NEXT: korw %k1, %k0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2
; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2
-; AVX512DQ-NEXT: kmovw %k2, %eax
; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1
; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
-; AVX512DQ-NEXT: kmovw %k1, %ecx
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: kmovw %ecx, %k0
-; AVX512DQ-NEXT: vpmovm2q %k0, %zmm1
+; AVX512DQ-NEXT: vpmovm2q %k1, %zmm1
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0
; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0
; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0
-; AVX512DQ-NEXT: kmovw %eax, %k1
-; AVX512DQ-NEXT: kshiftlb $7, %k1, %k1
+; AVX512DQ-NEXT: kshiftlb $7, %k2, %k1
; AVX512DQ-NEXT: korb %k1, %k0, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
; AVX512F-32-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm2
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: movl %esi, %eax
+; AVX512F-32-NEXT: movl %ecx, %eax
; AVX512F-32-NEXT: shrl $30, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
; AVX512F-32-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm1
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: movl %esi, %eax
+; AVX512F-32-NEXT: movl %ecx, %eax
; AVX512F-32-NEXT: shrl $31, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
; AVX512F-32-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm2
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: movl %esi, %eax
+; AVX512F-32-NEXT: movl %ecx, %eax
; AVX512F-32-NEXT: shrl $30, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
; AVX512F-32-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm1
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: movl %esi, %eax
+; AVX512F-32-NEXT: movl %ecx, %eax
; AVX512F-32-NEXT: shrl $31, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-SSSE3-NEXT: pslld $31, %xmm0
; SSE2-SSSE3-NEXT: psrad $31, %xmm0
; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-SSSE3-NEXT: psllw $15, %xmm0
; SSE2-SSSE3-NEXT: psraw $15, %xmm0
; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-SSSE3-NEXT: pslld $31, %xmm0
; SSE2-SSSE3-NEXT: psrad $31, %xmm0
; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-SSSE3-NEXT: psllw $15, %xmm0
; SSE2-SSSE3-NEXT: psraw $15, %xmm0
; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1
; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE2-LABEL: test_negative_zero_1:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movaps %xmm0, %xmm1
-; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE-NEXT: cvtss2sd %xmm2, %xmm4
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
; SSE-NEXT: movaps %xmm2, %xmm6
-; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm2[1],xmm6[1]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3]
; SSE-NEXT: movaps {{.*#+}} xmm7
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: andps %xmm7, %xmm2
; SSE-NEXT: orps %xmm0, %xmm4
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; SSE-NEXT: andps %xmm7, %xmm0
; SSE-NEXT: cvtss2sd %xmm3, %xmm3
; SSE-NEXT: andps %xmm8, %xmm3
; SSE-NEXT: orps %xmm6, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movaps %xmm3, %xmm1
-; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
; SSE-NEXT: andps %xmm5, %xmm1
; SSE-NEXT: xorps %xmm6, %xmm6
; SSE-NEXT: cvtsd2ss %xmm2, %xmm6
; SSE: # BB#0:
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: addss %xmm2, %xmm2
+; SSE-NEXT: addss %xmm0, %xmm2
; SSE-NEXT: mulss %xmm1, %xmm2
; SSE-NEXT: mulss %xmm0, %xmm0
; SSE-NEXT: mulss %xmm1, %xmm1
; SSE-LABEL: complex_square_f64:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: addsd %xmm2, %xmm2
+; SSE-NEXT: addsd %xmm0, %xmm2
; SSE-NEXT: mulsd %xmm1, %xmm2
; SSE-NEXT: mulsd %xmm0, %xmm0
; SSE-NEXT: mulsd %xmm1, %xmm1
; SSE-LABEL: complex_mul_f64:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE-NEXT: movaps %xmm1, %xmm3
-; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: mulsd %xmm0, %xmm4
; SSE-NEXT: mulsd %xmm1, %xmm0
; X64: # BB#0: # %entry
; X64-NEXT: movq %rdi, %rcx
; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F
-; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %rdx
; X64-NEXT: shrq $12, %rdx
; X64-NEXT: imulq $12345, %rdx, %rax # imm = 0x3039
; CHECK-LABEL: @test_fmaxf
; SSE: movaps %xmm0, %xmm2
-; SSE-NEXT: cmpunordss %xmm2, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: maxss %xmm0, %xmm1
; CHECK-LABEL: @test_fmax
; SSE: movapd %xmm0, %xmm2
-; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: maxsd %xmm0, %xmm1
; CHECK-LABEL: @test_intrinsic_fmaxf
; SSE: movaps %xmm0, %xmm2
-; SSE-NEXT: cmpunordss %xmm2, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: maxss %xmm0, %xmm1
; CHECK-LABEL: @test_intrinsic_fmax
; SSE: movapd %xmm0, %xmm2
-; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: maxsd %xmm0, %xmm1
; CHECK-LABEL: @test_fminf
; SSE: movaps %xmm0, %xmm2
-; SSE-NEXT: cmpunordss %xmm2, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: minss %xmm0, %xmm1
; CHECK-LABEL: @test_fmin
; SSE: movapd %xmm0, %xmm2
-; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: minsd %xmm0, %xmm1
; CHECK-LABEL: @test_intrinsic_fminf
; SSE: movaps %xmm0, %xmm2
-; SSE-NEXT: cmpunordss %xmm2, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: minss %xmm0, %xmm1
; CHECK-LABEL: @test_intrinsic_fmin
; SSE: movapd %xmm0, %xmm2
-; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: minsd %xmm0, %xmm1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: movaps %xmm0, %xmm1
-; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq $0, (%rsp)
; CHECK: # BB#0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: movaps %xmm0, %xmm1
-; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq $0, (%rsp)
; SSE-LABEL: not_a_hsub_2:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
; SSE-NEXT: subss %xmm3, %xmm2
; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE-NEXT: subss %xmm3, %xmm0
; SSE-NEXT: movaps %xmm1, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[2,3]
; SSE-NEXT: movaps %xmm1, %xmm4
-; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
; SSE-NEXT: subss %xmm4, %xmm3
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE-NEXT: subss %xmm4, %xmm1
; SSE-LABEL: not_a_hsub_3:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
; SSE-NEXT: subsd %xmm2, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE-NEXT: subsd %xmm0, %xmm2
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-LABEL: test5_undef:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: addss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: addss %xmm2, %xmm0
; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: subq $48, %rsp
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
-; CHECK-LIBCALL-NEXT: movzwl (%rbx), %edi
+; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: subq $16, %rsp
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
-; CHECK-LIBCALL-NEXT: movzwl 4(%rbx), %edi
+; CHECK-LIBCALL-NEXT: movzwl 4(%rdi), %edi
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi
; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp
; CHECK-I686-NEXT: movaps %xmm0, %xmm1
-; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
; CHECK-I686-NEXT: movss %xmm1, (%esp)
; CHECK-I686-NEXT: calll __gnu_f2h_ieee
; CHECK-I686-NEXT: movw %ax, %si
; CHECK-NEXT: fstpt (%esp)
; CHECK-NEXT: calll _ceil
; CHECK-NEXT: fld %st(0)
+; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fistpl %st(0)
; CHECK-NEXT: ## InlineAsm End
call void @foo()
; CHECK-LABEL: bar:
; CHECK: callq foo
- ; CHECK-NEXT: movl %eax, %r15d
+ ; CHECK-NEXT: movl %edi, %r15d
call void asm sideeffect "movl $0, %r12d", "{r15}~{r12}"(i32 %X)
ret void
}
; X64-LABEL: print_framealloc_from_fp:
; X64: movq %rcx, %[[parent_fp:[a-z]+]]
-; X64: movl .Lalloc_func$frame_escape_0(%[[parent_fp]]), %edx
+; X64: movl .Lalloc_func$frame_escape_0(%rcx), %edx
; X64: leaq {{.*}}(%rip), %[[str:[a-z]+]]
; X64: movq %[[str]], %rcx
; X64: callq printf
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %esi
; X32-NEXT: movl %esi, %ebx
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: pushl %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl $0
; X32-NEXT: pushl %edi
; X32-NEXT: movl %ebx, %esi
-; X32-NEXT: pushl %esi
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl %edi
-; X32-NEXT: movl %edi, %ebx
; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: movl %edi, %ebx
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %edi, %eax
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X64-NEXT: adcq $0, %rbp
; X64-NEXT: addq %rcx, %rbx
; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rcx, %r11
; X64-NEXT: adcq %rdi, %rbp
; X64-NEXT: setb %bl
; X64-NEXT: movzbl %bl, %ebx
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r11, %r12
-; X64-NEXT: movq %r11, %r8
+; X64-NEXT: movq %rcx, %r12
+; X64-NEXT: movq %rcx, %r8
; X64-NEXT: addq %rax, %r12
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq %rdi, %r9
-; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdi, (%rsp) # 8-byte Spill
; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: addq %rbp, %r12
; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rdx, %rbx
; X64-NEXT: movq 16(%rsi), %rax
; X64-NEXT: movq %rsi, %r13
-; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rbx, %r11
; X64-NEXT: movq %r8, %rax
; X64-NEXT: movq %r8, %rbp
-; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %rdi, %rax
; X64-NEXT: movq %r9, %rax
; X64-NEXT: adcq %rcx, %rax
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rdi, %rax
-; X64-NEXT: movq %rdi, %r9
-; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq %rdx, %rax
; X64-NEXT: adcq %rcx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq 32(%r13), %rax
; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: addq %r9, %rax
+; X64-NEXT: addq %rdi, %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdi, %r9
+; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: adcq %r15, %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %rsi, %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: addq %rcx, %r11
+; X64-NEXT: addq %rbx, %r11
; X64-NEXT: adcq %rsi, %rbp
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: setb %bl
; X64-NEXT: adcq %rbx, %r10
; X64-NEXT: movq %rcx, %rdx
; X64-NEXT: movq %rcx, %r12
-; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %r9, %rdx
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %r11, %r8
-; X64-NEXT: adcq %r8, %r15
+; X64-NEXT: adcq %r11, %r15
; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rax, %r14
; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rdx, %r12
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %r10, %rbp
-; X64-NEXT: mulq %rbp
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %rbp
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rsi, %rbx
; X64-NEXT: adcq $0, %r15
; X64-NEXT: adcq $0, %r12
; X64-NEXT: movq %r10, %rbx
-; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq %r10, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rbx, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %rbx
-; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %rbp, %r8
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r11, %rsi
-; X64-NEXT: mulq %rsi
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, %r13
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
; X64-NEXT: adcq %rdx, %r10
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %r11, %rbp
-; X64-NEXT: mulq %rbp
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: mulq %rbp
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rdi, %rbx
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %r14
; X64-NEXT: movq %r8, %rbp
-; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: movq %r8, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r11
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: adcq $0, %r9
; X64-NEXT: adcq $0, %r10
; X64-NEXT: movq %rbp, %rsi
-; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq %rbp, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r14
; X64-NEXT: adcq $0, %r15
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: movq %r8, %rdi
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %r9
; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %rbx, %r8
; X64-NEXT: movq %rcx, %r14
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %r10, %rdi
-; X64-NEXT: mulq %rdi
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: mulq %rdi
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %r11, %rbx
; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq $0, %r14
; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r13, %rbx
-; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq %r13, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: addq %r8, %rcx
; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq %r13, %rax
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
; X64-NEXT: mulq %r13
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq %r10, %rsi
-; X64-NEXT: mulq %rsi
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
; X64-NEXT: movq %r8, %rax
-; X64-NEXT: mulq %rsi
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rdi
; X64-NEXT: addq %rcx, %rdi
; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r10
-; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %rdi
; X64-NEXT: addq %rsi, %rdi
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: movq %rdx, %r14
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
-; X64-NEXT: addq %rbx, %r12
+; X64-NEXT: addq %rax, %r12
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload
-; X64-NEXT: adcq %r14, %r15
+; X64-NEXT: adcq %rdx, %r15
; X64-NEXT: addq %rdi, %r12
; X64-NEXT: adcq %rcx, %r15
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r11, %rsi
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %rsi
+; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, %r9
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
-; X64-NEXT: addq %r9, %rbp
+; X64-NEXT: addq %rax, %rbp
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: addq %rsi, %rbp
; X64-NEXT: movq 88(%rsi), %rax
; X64-NEXT: movq %rsi, %r9
; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq %r8, %r10
; X64-NEXT: addq %rbx, %rsi
; X64-NEXT: adcq %rbp, %r10
-; X64-NEXT: movq %r9, %rdi
-; X64-NEXT: movq 64(%rdi), %r13
+; X64-NEXT: movq 64(%r9), %r13
; X64-NEXT: movq %r13, %rax
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq 72(%rdi), %r9
+; X64-NEXT: movq 72(%r9), %r9
; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, %r15
; X64-NEXT: movq %r12, %rcx
-; X64-NEXT: addq %r15, %rcx
-; X64-NEXT: adcq %r11, %r8
+; X64-NEXT: addq %rax, %rcx
+; X64-NEXT: adcq %rdx, %r8
; X64-NEXT: addq %rbp, %rcx
; X64-NEXT: adcq %rbx, %r8
; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
; X64-NEXT: setb %r10b
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: movq %r8, %rdi
-; X64-NEXT: mulq %rdi
+; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r9
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rdi, %r12
+; X64-NEXT: mulq %r8
+; X64-NEXT: movq %r8, %r12
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rcx, %rbx
; X64-NEXT: imulq %rcx, %rdi
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r12, %rsi
-; X64-NEXT: mulq %rsi
+; X64-NEXT: mulq %r12
; X64-NEXT: movq %rax, %r9
; X64-NEXT: addq %rdi, %rdx
; X64-NEXT: movq 104(%rbp), %r8
; X64-NEXT: movq 8(%rsi), %rbp
; X64-NEXT: movq %r15, %rax
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: mulq %rsi
+; X64-NEXT: mulq %rdx
; X64-NEXT: movq %rdx, %r9
; X64-NEXT: movq %rax, %r8
; X64-NEXT: movq %r11, %rax
; X64-NEXT: movq %r11, %rax
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rbp, %r14
-; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbp
; X64-NEXT: addq %rcx, %rbp
; X64-NEXT: adcq %rbx, %rsi
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: movq %r10, %rbx
-; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r10, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r13
; X64-NEXT: movq %rax, %r10
; X64-NEXT: movq %r15, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: # kill: %RAX<kill>
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, %r15
-; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %r10, %r15
; X64-NEXT: adcq %r13, %rdx
; X64-NEXT: addq %rbp, %r15
; X64-NEXT: mulq %rdx
; X64-NEXT: movq %rdx, %r14
; X64-NEXT: movq %rax, %r11
-; X64-NEXT: addq %r11, %r10
-; X64-NEXT: adcq %r14, %r13
+; X64-NEXT: addq %rax, %r10
+; X64-NEXT: adcq %rdx, %r13
; X64-NEXT: addq %rbp, %r10
; X64-NEXT: adcq %rsi, %r13
; X64-NEXT: addq %r8, %r10
; X64-NEXT: movq 16(%rsi), %r8
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %rcx, %r9
-; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %r12
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, %rbp
-; X64-NEXT: addq %rbp, %r11
+; X64-NEXT: addq %rax, %r11
; X64-NEXT: adcq %rdx, %r14
; X64-NEXT: addq %r9, %r11
; X64-NEXT: adcq %rbx, %r14
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: imulq %rdi, %rcx
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %r8
+; X64-NEXT: mulq %rdx
; X64-NEXT: addq %rcx, %rdx
; X64-NEXT: imulq %r8, %rsi
; X64-NEXT: addq %rsi, %rdx
; SSE2-LABEL: mul_v16i8c:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
; SSE2-NEXT: pmullw %xmm2, %xmm1
; SSE2-LABEL: mul_v16i8:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm3
; SSE2-NEXT: pmullw %xmm2, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE2-LABEL: mul_v32i8c:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-LABEL: mul_v32i8:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: packuswb %xmm5, %xmm0
; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm2, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-LABEL: mul_v64i8c:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: packuswb %xmm6, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: packuswb %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: packuswb %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117]
; SSE41-NEXT: pmullw %xmm6, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
; SSE2-LABEL: mul_v64i8:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm4, %xmm8
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15]
; SSE2-NEXT: psraw $8, %xmm8
; SSE2-NEXT: movdqa %xmm0, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm9
; SSE2-NEXT: pmullw %xmm8, %xmm9
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: packuswb %xmm9, %xmm0
; SSE2-NEXT: movdqa %xmm5, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
; SSE2-NEXT: psraw $8, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: pmullw %xmm9, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm1
; SSE2-NEXT: packuswb %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm6, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm2
; SSE2-NEXT: packuswb %xmm5, %xmm2
; SSE2-NEXT: movdqa %xmm7, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15]
; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm8
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pmovsxbw %xmm4, %xmm9
-; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
; SSE41-NEXT: pmullw %xmm9, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: pand %xmm9, %xmm0
; CHECK-LABEL: pow_wrapper:
; CHECK: # BB#0:
; CHECK-NEXT: movapd %xmm0, %xmm1
-; CHECK-NEXT: mulsd %xmm1, %xmm1
+; CHECK-NEXT: mulsd %xmm0, %xmm1
; CHECK-NEXT: mulsd %xmm1, %xmm0
; CHECK-NEXT: mulsd %xmm1, %xmm1
; CHECK-NEXT: mulsd %xmm1, %xmm0
; SSE-NEXT: cvtps2pd %xmm0, %xmm0
; SSE-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm2, %xmm1
-; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; SSE-NEXT: fldl -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3],xmm9[3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0]
; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2
-; CHECK-NEXT: vmovaps %xmm15, %xmm1
-; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9
+; CHECK-NEXT: vmovaps %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill
+; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm9
; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0
-; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8
+; CHECK-NEXT: vaddps %xmm15, %xmm15, %xmm8
; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3
; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm0
; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %xmm9, (%rsp)
+; CHECK-NEXT: vmovaps %xmm15, %xmm1
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq foo
; SSE41-LABEL: test14:
; SSE41: ## BB#0: ## %vector.ph
; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,0,1]
; MCU-NEXT: jne .LBB0_1
; MCU-NEXT: # BB#2:
; MCU-NEXT: addl $8, %edx
-; MCU-NEXT: movl %edx, %eax
-; MCU-NEXT: movl (%eax), %eax
+; MCU-NEXT: movl (%edx), %eax
; MCU-NEXT: retl
; MCU-NEXT: .LBB0_1:
; MCU-NEXT: addl $8, %eax
; CHECK-LABEL: @use_eax_before_prologue@8: # @use_eax_before_prologue
; CHECK: movl %ecx, %eax
-; CHECK: cmpl %edx, %eax
+; CHECK: cmpl %edx, %ecx
; CHECK: jge LBB1_2
; CHECK: pushl %eax
; CHECK: movl $4092, %eax
; SSE: # BB#0:
; SSE-NEXT: rsqrtss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: mulss %xmm2, %xmm2
+; SSE-NEXT: mulss %xmm1, %xmm2
; SSE-NEXT: mulss %xmm0, %xmm2
; SSE-NEXT: addss {{.*}}(%rip), %xmm2
; SSE-NEXT: mulss {{.*}}(%rip), %xmm1
; SSE: # BB#0:
; SSE-NEXT: rsqrtps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: mulps %xmm2, %xmm2
+; SSE-NEXT: mulps %xmm1, %xmm2
; SSE-NEXT: mulps %xmm0, %xmm2
; SSE-NEXT: addps {{.*}}(%rip), %xmm2
; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
; SSE-NEXT: rsqrtps %xmm0, %xmm3
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01]
; SSE-NEXT: movaps %xmm3, %xmm2
-; SSE-NEXT: mulps %xmm2, %xmm2
+; SSE-NEXT: mulps %xmm3, %xmm2
; SSE-NEXT: mulps %xmm0, %xmm2
; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00]
; SSE-NEXT: addps %xmm0, %xmm2
; SSE-NEXT: mulps %xmm3, %xmm2
; SSE-NEXT: rsqrtps %xmm1, %xmm5
; SSE-NEXT: movaps %xmm5, %xmm3
-; SSE-NEXT: mulps %xmm3, %xmm3
+; SSE-NEXT: mulps %xmm5, %xmm3
; SSE-NEXT: mulps %xmm1, %xmm3
; SSE-NEXT: addps %xmm0, %xmm3
; SSE-NEXT: mulps %xmm4, %xmm3
; SSE2-NEXT: testb $1, %dil
; SSE2-NEXT: jne .LBB62_1
; SSE2-NEXT: # BB#2:
-; SSE2-NEXT: movaps %xmm2, %xmm1
-; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE2-NEXT: retq
; SSE2-NEXT: .LBB62_1:
; SSE2-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: testb $1, %dil
; SSE41-NEXT: jne .LBB62_1
; SSE41-NEXT: # BB#2:
-; SSE41-NEXT: movaps %xmm2, %xmm1
-; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
; SSE41-NEXT: retq
; SSE41-NEXT: .LBB62_1:
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE2-NEXT: testb $1, %dil
; SSE2-NEXT: jne .LBB63_1
; SSE2-NEXT: # BB#2:
-; SSE2-NEXT: movapd %xmm2, %xmm1
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSE2-NEXT: retq
; SSE2-NEXT: .LBB63_1:
; SSE2-NEXT: addsd %xmm0, %xmm1
; SSE41-NEXT: testb $1, %dil
; SSE41-NEXT: jne .LBB63_1
; SSE41-NEXT: # BB#2:
-; SSE41-NEXT: movapd %xmm2, %xmm1
-; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSE41-NEXT: retq
; SSE41-NEXT: .LBB63_1:
; SSE41-NEXT: addsd %xmm0, %xmm1
; X32-LABEL: test4:
; X32: # BB#0: # %entry
; X32-NEXT: movaps %xmm0, %xmm2
-; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
; X32-NEXT: addss %xmm1, %xmm0
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; X32-NEXT: subss %xmm1, %xmm2
; X64-LABEL: test4:
; X64: # BB#0: # %entry
; X64-NEXT: movaps %xmm0, %xmm2
-; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
; X64-NEXT: addss %xmm1, %xmm0
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; X64-NEXT: subss %xmm1, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1]
; SSE-NEXT: movaps %xmm1, %xmm4
-; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
; SSE-NEXT: subss %xmm4, %xmm3
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE-NEXT: addss %xmm0, %xmm4
; CHECK-NEXT: Lcfi11:
; CHECK-NEXT: .cfi_offset %rbx, -16
; CHECK-NEXT: movl %edi, %ebx
-; CHECK-NEXT: movl %ebx, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp)
; CHECK-NEXT: callq _baz
; CHECK-NEXT: Ltmp6:
; CHECK-NEXT: callq _bar
; CHECK: .byte 1
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 4
-; CHECK-NEXT: .short 6
+; CHECK-NEXT: .short 5
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
; CHECK: .byte 1
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 4
-; CHECK-NEXT: .short 3
+; CHECK-NEXT: .short 4
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
; CHECK: Ltmp2-_test2
gc "statepoint-example" {
; CHECK-LABEL: back_to_back_deopt
; The exact stores don't matter, but there need to be three stack slots created
-; CHECK-DAG: movl %ebx, 12(%rsp)
-; CHECK-DAG: movl %ebp, 8(%rsp)
-; CHECK-DAG: movl %r14d, 4(%rsp)
+; CHECK-DAG: movl %edi, 12(%rsp)
+; CHECK-DAG: movl %esi, 8(%rsp)
+; CHECK-DAG: movl %edx, 4(%rsp)
; CHECK: callq
; CHECK-DAG: movl %ebx, 12(%rsp)
; CHECK-DAG: movl %ebp, 8(%rsp)
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-LABEL: fptoui_4f32_to_4i32:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movd %eax, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE-NEXT: cvttss2si %xmm2, %rax
; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: movaps %xmm2, %xmm3
-; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm2[1],xmm3[1]
; SSE-NEXT: cvttss2si %xmm3, %rax
; SSE-NEXT: movd %eax, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3]
; SSE-NEXT: cvttss2si %xmm2, %rax
; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: movaps %xmm1, %xmm3
-; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
; SSE-NEXT: cvttss2si %xmm3, %rax
; SSE-NEXT: movd %eax, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSE-NEXT: cmovaeq %rcx, %rdx
; SSE-NEXT: movq %rdx, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx
; SSE-NEXT: movq %rdx, %xmm3
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx
; SSE-NEXT: cmovaeq %rcx, %rdx
; SSE-NEXT: movq %rdx, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx
; SSE-NEXT: movq %rdx, %xmm3
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx
; SSE-LABEL: uitofp_2i64_to_4f32:
; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
+; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB39_1
; SSE-NEXT: # BB#2:
; SSE-LABEL: uitofp_4i64_to_4f32_undef:
; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
+; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB41_1
; SSE-NEXT: # BB#2:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm1, %xmm3
-; SSE42-NEXT: pcmpgtq %xmm2, %xmm3
+; SSE42-NEXT: pcmpgtq %xmm0, %xmm3
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
; SSE42-NEXT: pxor %xmm3, %xmm0
; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; X32: # BB#0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm2
; X32-NEXT: psllw $5, %xmm1
-; X32-NEXT: movdqa %xmm2, %xmm3
+; X32-NEXT: movdqa %xmm0, %xmm3
; X32-NEXT: psllw $4, %xmm3
; X32-NEXT: pand {{\.LCPI.*}}, %xmm3
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X32-NEXT: movdqa %xmm2, %xmm3
-; X32-NEXT: paddb %xmm3, %xmm3
+; X32-NEXT: paddb %xmm2, %xmm3
; X32-NEXT: paddb %xmm1, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X64: # BB#0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: psllw $5, %xmm1
-; X64-NEXT: movdqa %xmm2, %xmm3
+; X64-NEXT: movdqa %xmm0, %xmm3
; X64-NEXT: psllw $4, %xmm3
; X64-NEXT: pand {{.*}}(%rip), %xmm3
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X64-NEXT: movdqa %xmm2, %xmm3
-; X64-NEXT: paddb %xmm3, %xmm3
+; X64-NEXT: paddb %xmm2, %xmm3
; X64-NEXT: paddb %xmm1, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psrad $31, %xmm1
; SSE41-NEXT: pxor %xmm3, %xmm3
-; SSE41-NEXT: psubd %xmm2, %xmm3
+; SSE41-NEXT: psubd %xmm0, %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3
; SSE41-NEXT: movaps %xmm3, %xmm0
; SSE2-LABEL: test_div7_16i8:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: pmullw %xmm3, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
; SSE2-LABEL: test_rem7_16i8:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: pmullw %xmm3, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: paddb %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: paddw %xmm4, %xmm4
+; SSE41-NEXT: paddw %xmm1, %xmm4
; SSE41-NEXT: movdqa %xmm3, %xmm6
; SSE41-NEXT: psllw $8, %xmm6
; SSE41-NEXT: movdqa %xmm3, %xmm5
; SSE41-NEXT: psllw $4, %xmm2
; SSE41-NEXT: por %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: paddw %xmm1, %xmm1
+; SSE41-NEXT: paddw %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psrlw $8, %xmm4
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; SSE41-NEXT: psubb %xmm3, %xmm2
; SSE41-NEXT: psllw $5, %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
; SSE41-NEXT: psllw $4, %xmm5
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
-; SSE41-NEXT: movdqa %xmm1, %xmm4
+; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm5
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: paddb %xmm5, %xmm5
+; SSE41-NEXT: paddb %xmm4, %xmm5
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: psllw $5, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddb %xmm3, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: psrlw $4, %xmm5
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
; SSE41-LABEL: constant_rotate_v16i8:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: psllw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256]
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddb %xmm3, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSSE3-LABEL: sext_16i8_to_8i32:
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,4,u,u,u,5,u,u,u,6,u,u,u,7]
; SSSE3-LABEL: sext_16i8_to_16i32:
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
; SSSE3-LABEL: sext_16i8_to_4i64:
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: psrad $31, %xmm2
; SSE2-LABEL: sext_16i8_to_8i64:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: psrad $31, %xmm3
-; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: psrad $31, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSSE3-NEXT: movdqa %xmm1, %xmm2
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: psrad $31, %xmm3
-; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
; SSSE3-NEXT: psrad $31, %xmm4
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: pslld $31, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: pslld $31, %xmm0
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: psllw $15, %xmm0
; SSE2-NEXT: psraw $15, %xmm0
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: psllw $15, %xmm0
; SSSE3-NEXT: psraw $15, %xmm0
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddw %xmm3, %xmm3
+; SSE41-NEXT: paddw %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psraw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddw %xmm3, %xmm3
+; SSE41-NEXT: paddw %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psrlw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: psrlw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pshufb %xmm0, %xmm1
; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddb %xmm3, %xmm3
+; SSE41-NEXT: paddb %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psrlw $4, %xmm4
; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
; SSE41-LABEL: constant_shift_v16i8:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psrlw $4, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddw %xmm3, %xmm3
+; SSE41-NEXT: paddw %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psllw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: psllw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddb %xmm3, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: pshufb %xmm0, %xmm1
; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddb %xmm3, %xmm3
+; SSE41-NEXT: paddb %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psllw $4, %xmm4
; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: paddb %xmm1, %xmm1
+; SSE41-NEXT: paddb %xmm2, %xmm1
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-LABEL: constant_shift_v16i8:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $4, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: paddb %xmm2, %xmm2
+; SSE41-NEXT: paddb %xmm1, %xmm2
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE-LABEL: PR22377:
; SSE: # BB#0: # %entry
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3,1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2]
; SSE-NEXT: addps %xmm0, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-LABEL: mul_add_const_v4i64_v4i32:
; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4
-; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4
-; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE41-NEXT: retq
;
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE41-NEXT: retq
;
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: movdqa %xmm3, %xmm12
; SSE2-NEXT: pcmpgtb %xmm7, %xmm12
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm13, %xmm3
-; SSE2-NEXT: movdqa %xmm9, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm14
; SSE2-NEXT: pcmpgtb %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa %xmm7, %xmm12
-; SSE2-NEXT: pcmpgtb %xmm8, %xmm12
+; SSE2-NEXT: pcmpgtb %xmm3, %xmm12
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm3
; SSE2-NEXT: movdqa %xmm6, %xmm13
-; SSE2-NEXT: pcmpgtb %xmm9, %xmm13
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: movdqa %xmm3, %xmm12
; SSE2-NEXT: pcmpgtd %xmm7, %xmm12
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm13, %xmm3
-; SSE2-NEXT: movdqa %xmm9, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm14
; SSE2-NEXT: pcmpgtd %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa %xmm7, %xmm12
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm12
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm3
; SSE2-NEXT: movdqa %xmm6, %xmm13
-; SSE2-NEXT: pcmpgtd %xmm9, %xmm13
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14
; SSE2-LABEL: test122:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test124:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
-; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test126:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test128:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
-; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm13, %xmm9
-; SSE2-NEXT: movdqa %xmm8, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm14
; SSE2-NEXT: pcmpgtb %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2
; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm0, %xmm9
; SSE2-NEXT: movdqa %xmm6, %xmm13
-; SSE2-NEXT: pcmpgtb %xmm8, %xmm13
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm13, %xmm9
-; SSE2-NEXT: movdqa %xmm8, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm14
; SSE2-NEXT: pcmpgtd %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2
; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm0, %xmm9
; SSE2-NEXT: movdqa %xmm6, %xmm13
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm13
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14
; SSE2-LABEL: test154:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test156:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
-; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test158:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test160:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
-; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE4: # BB#0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm3
-; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
+; SSE4-NEXT: pcmpgtq %xmm0, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0
; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE4: # BB#0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm3
-; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
+; SSE4-NEXT: pcmpgtq %xmm0, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0
; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-SSE2-NEXT: movss %xmm0, (%eax)
; X86-SSE2-NEXT: movaps %xmm0, %xmm1
-; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; X86-SSE2-NEXT: movss %xmm1, 8(%eax)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X86-SSE2-NEXT: movss %xmm0, 4(%eax)
; X86-SSE2-NEXT: movups %xmm0, (%eax)
; X86-SSE2-NEXT: movss %xmm2, 16(%eax)
; X86-SSE2-NEXT: movaps %xmm2, %xmm0
-; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm2[1],xmm0[1]
; X86-SSE2-NEXT: movss %xmm0, 24(%eax)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
; X86-SSE2-NEXT: movss %xmm2, 20(%eax)
; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-SSE2-NEXT: movss %xmm0, (%eax)
; X86-SSE2-NEXT: movaps %xmm0, %xmm1
-; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; X86-SSE2-NEXT: movss %xmm1, 8(%eax)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X86-SSE2-NEXT: movss %xmm0, 4(%eax)
; Compare the arguments and jump to exit.
; After the prologue is set.
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
-; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
+; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Store %a in the alloca.
; Compare the arguments and jump to exit.
; After the prologue is set.
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
-; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
+; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Prologue code.
; Compare the arguments and jump to exit.
; After the prologue is set.
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
-; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
+; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Prologue code.
; Compare the arguments and jump to exit.
; No prologue needed.
; ENABLE: movl %edi, [[ARG0CPY:%e[a-z]+]]
-; ENABLE-NEXT: cmpl %esi, [[ARG0CPY]]
+; ENABLE-NEXT: cmpl %esi, %edi
; ENABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Prologue code.
; Compare the arguments and jump to exit.
; After the prologue is set.
; DISABLE: movl %edi, [[ARG0CPY:%e[a-z]+]]
-; DISABLE-NEXT: cmpl %esi, [[ARG0CPY]]
+; DISABLE-NEXT: cmpl %esi, %edi
; DISABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Store %a in the alloca.