/// MachineSinking - This pass performs sinking on machine instructions.
extern char &MachineSinkingID;
+ /// MachineCopyPropagationPreRegRewrite - This pass performs copy propagation
+ /// on machine instructions after register allocation but before virtual
+ /// register re-writing..
+ extern char &MachineCopyPropagationPreRegRewriteID;
+
/// MachineCopyPropagation - This pass performs copy propagation on
/// machine instructions.
extern char &MachineCopyPropagationID;
void initializeMachineCSEPass(PassRegistry&);
void initializeMachineCombinerPass(PassRegistry&);
void initializeMachineCopyPropagationPass(PassRegistry&);
+void initializeMachineCopyPropagationPreRegRewritePass(PassRegistry&);
void initializeMachineDominanceFrontierPass(PassRegistry&);
void initializeMachineDominatorTreePass(PassRegistry&);
void initializeMachineFunctionPrinterPassPass(PassRegistry&);
initializeMachineCSEPass(Registry);
initializeMachineCombinerPass(Registry);
initializeMachineCopyPropagationPass(Registry);
+ initializeMachineCopyPropagationPreRegRewritePass(Registry);
initializeMachineDominatorTreePass(Registry);
initializeMachineFunctionPrinterPassPass(Registry);
initializeMachineLICMPass(Registry);
//
//===----------------------------------------------------------------------===//
//
-// This is an extremely simple MachineInstr-level copy propagation pass.
+// This is a simple MachineInstr-level copy forwarding pass. It may be run at
+// two places in the codegen pipeline:
+// - After register allocation but before virtual registers have been remapped
+// to physical registers.
+// - After physical register remapping.
+//
+// The optimizations done vary slightly based on whether virtual registers are
+// still present. In both cases, this pass forwards the source of COPYs to the
+// users of their destinations when doing so is legal. For example:
+//
+// %vreg1 = COPY %vreg0
+// ...
+// ... = OP %vreg1
+//
+// If
+// - the physical register assigned to %vreg0 has not been clobbered by the
+// time of the use of %vreg1
+// - the register class constraints are satisfied
+// - the COPY def is the only value that reaches OP
+// then this pass replaces the above with:
+//
+// %vreg1 = COPY %vreg0
+// ...
+// ... = OP %vreg0
+//
+// and updates the relevant state required by VirtRegMap (e.g. LiveIntervals).
+// COPYs whose LiveIntervals become dead as a result of this forwarding (i.e. if
+// all uses of %vreg1 are changed to %vreg0) are removed.
+//
+// When being run with only physical registers, this pass will also remove some
+// redundant COPYs. For example:
+//
+// %R1 = COPY %R0
+// ... // No clobber of %R1
+// %R0 = COPY %R1 <<< Removed
+//
+// or
+//
+// %R1 = COPY %R0
+// ... // No clobber of %R0
+// %R1 = COPY %R0 <<< Removed
//
//===----------------------------------------------------------------------===//
+#include "LiveDebugVariables.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/DebugCounter.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#define DEBUG_TYPE "machine-cp"
STATISTIC(NumDeletes, "Number of dead copies deleted");
+STATISTIC(NumCopyForwards, "Number of copy uses forwarded");
+DEBUG_COUNTER(FwdCounter, "machine-cp-fwd",
+ "Controls which register COPYs are forwarded");
namespace {
using SourceMap = DenseMap<unsigned, RegList>;
using Reg2MIMap = DenseMap<unsigned, MachineInstr *>;
- class MachineCopyPropagation : public MachineFunctionPass {
+ class MachineCopyPropagation : public MachineFunctionPass,
+ private LiveRangeEdit::Delegate {
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
- const MachineRegisterInfo *MRI;
+ MachineRegisterInfo *MRI;
+ MachineFunction *MF;
+ SlotIndexes *Indexes;
+ LiveIntervals *LIS;
+ const VirtRegMap *VRM;
+ // True if this pass being run before virtual registers are remapped to
+ // physical ones.
+ bool PreRegRewrite;
+ bool NoSubRegLiveness;
+
+ protected:
+ MachineCopyPropagation(char &ID, bool PreRegRewrite)
+ : MachineFunctionPass(ID), PreRegRewrite(PreRegRewrite) {}
public:
static char ID; // Pass identification, replacement for typeid
- MachineCopyPropagation() : MachineFunctionPass(ID) {
+ MachineCopyPropagation() : MachineCopyPropagation(ID, false) {
initializeMachineCopyPropagationPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
+ if (PreRegRewrite) {
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addRequired<VirtRegMap>();
+ AU.addPreserved<VirtRegMap>();
+ AU.addPreserved<LiveDebugVariables>();
+ AU.addPreserved<LiveStacks>();
+ }
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool runOnMachineFunction(MachineFunction &MF) override;
MachineFunctionProperties getRequiredProperties() const override {
+ if (PreRegRewrite)
+ return MachineFunctionProperties()
+ .set(MachineFunctionProperties::Property::NoPHIs)
+ .set(MachineFunctionProperties::Property::TracksLiveness);
return MachineFunctionProperties().set(
MachineFunctionProperties::Property::NoVRegs);
}
void ReadRegister(unsigned Reg);
void CopyPropagateBlock(MachineBasicBlock &MBB);
bool eraseIfRedundant(MachineInstr &Copy, unsigned Src, unsigned Def);
+ unsigned getPhysReg(unsigned Reg, unsigned SubReg);
+ unsigned getPhysReg(const MachineOperand &Opnd) {
+ return getPhysReg(Opnd.getReg(), Opnd.getSubReg());
+ }
+ unsigned getFullPhysReg(const MachineOperand &Opnd) {
+ return getPhysReg(Opnd.getReg(), 0);
+ }
+ void forwardUses(MachineInstr &MI);
+ bool isForwardableRegClassCopy(const MachineInstr &Copy,
+ const MachineInstr &UseI);
+ std::tuple<unsigned, unsigned, bool>
+ checkUseSubReg(const MachineOperand &CopySrc, const MachineOperand &MOUse);
+ bool hasImplicitOverlap(const MachineInstr &MI, const MachineOperand &Use);
+ void narrowRegClass(const MachineInstr &MI, const MachineOperand &MOUse,
+ unsigned NewUseReg, unsigned NewUseSubReg);
+ void updateForwardedCopyLiveInterval(const MachineInstr &Copy,
+ const MachineInstr &UseMI,
+ bool UseIsEarlyClobber,
+ unsigned OrigUseReg,
+ unsigned NewUseReg,
+ unsigned NewUseSubReg);
+ /// LiveRangeEdit callback for eliminateDeadDefs().
+ void LRE_WillEraseInstruction(MachineInstr *MI) override;
/// Candidates for deletion.
SmallSetVector<MachineInstr*, 8> MaybeDeadCopies;
+ SmallVector<MachineInstr*, 8> ShrunkDeadInsts;
/// Def -> available copies map.
Reg2MIMap AvailCopyMap;
bool Changed;
};
+ class MachineCopyPropagationPreRegRewrite : public MachineCopyPropagation {
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ MachineCopyPropagationPreRegRewrite()
+ : MachineCopyPropagation(ID, true) {
+ initializeMachineCopyPropagationPreRegRewritePass(*PassRegistry::getPassRegistry());
+ }
+ };
} // end anonymous namespace
char MachineCopyPropagation::ID = 0;
INITIALIZE_PASS(MachineCopyPropagation, DEBUG_TYPE,
"Machine Copy Propagation Pass", false, false)
+/// We have two separate passes that are very similar, the only difference being
+/// where they are meant to be run in the pipeline. This is done for several
+/// reasons:
+/// - the two passes have different dependencies
+/// - some targets want to disable the later run of this pass, but not the
+/// earlier one (e.g. NVPTX and WebAssembly)
+/// - it allows for easier debugging via llc
+
+char MachineCopyPropagationPreRegRewrite::ID = 0;
+char &llvm::MachineCopyPropagationPreRegRewriteID = MachineCopyPropagationPreRegRewrite::ID;
+
+INITIALIZE_PASS_BEGIN(MachineCopyPropagationPreRegRewrite,
+ "machine-cp-prerewrite",
+ "Machine Copy Propagation Pre-Register Rewrite Pass",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
+INITIALIZE_PASS_END(MachineCopyPropagationPreRegRewrite,
+ "machine-cp-prerewrite",
+ "Machine Copy Propagation Pre-Register Rewrite Pass", false,
+ false)
+
/// Remove any entry in \p Map where the register is a subregister or equal to
/// a register contained in \p Regs.
static void removeRegsFromMap(Reg2MIMap &Map, const RegList &Regs,
}
void MachineCopyPropagation::ReadRegister(unsigned Reg) {
+ // We don't track MaybeDeadCopies when running pre-VirtRegRewriter.
+ if (PreRegRewrite)
+ return;
+
// If 'Reg' is defined by a copy, the copy is no longer a candidate
// for elimination.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
return SubIdx == TRI->getSubRegIndex(PreviousDef, Def);
}
+/// Return the physical register assigned to \p Reg if it is a virtual register,
+/// otherwise just return the physical reg from the operand itself.
+///
+/// If \p SubReg is 0 then return the full physical register assigned to the
+/// virtual register ignoring subregs. If we aren't tracking sub-reg liveness
+/// then we need to use this to be more conservative with clobbers by killing
+/// all super reg and their sub reg COPYs as well. This is to prevent COPY
+/// forwarding in cases like the following:
+///
+/// %vreg2 = COPY %vreg1:sub1
+/// %vreg3 = COPY %vreg1:sub0
+/// ... = OP1 %vreg2
+/// ... = OP2 %vreg3
+///
+/// After forward %vreg2 (assuming this is the last use of %vreg1) and
+/// VirtRegRewriter adding kill markers we have:
+///
+/// %vreg3 = COPY %vreg1:sub0
+/// ... = OP1 %vreg1:sub1<kill>
+/// ... = OP2 %vreg3
+///
+/// If %vreg3 is assigned to a sub-reg of %vreg1, then after rewriting we have:
+///
+/// ... = OP1 R0:sub1, R0<imp-use,kill>
+/// ... = OP2 R0:sub0
+///
+/// and the use of R0 by OP2 will not have a valid definition.
+unsigned MachineCopyPropagation::getPhysReg(unsigned Reg, unsigned SubReg) {
+
+ // Physical registers cannot have subregs.
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ return Reg;
+
+ assert(PreRegRewrite && "Unexpected virtual register encountered");
+ Reg = VRM->getPhys(Reg);
+ if (SubReg && !NoSubRegLiveness)
+ Reg = TRI->getSubReg(Reg, SubReg);
+ return Reg;
+}
+
/// Remove instruction \p Copy if there exists a previous copy that copies the
/// register \p Src to the register \p Def; This may happen indirectly by
/// copying the super registers.
return true;
}
+
+/// Decide whether we should forward the destination of \param Copy to its use
+/// in \param UseI based on the register class of the Copy operands. Same-class
+/// COPYs are always accepted by this function, but cross-class COPYs are only
+/// accepted if they are forwarded to another COPY with the operand register
+/// classes reversed. For example:
+///
+/// RegClassA = COPY RegClassB // Copy parameter
+/// ...
+/// RegClassB = COPY RegClassA // UseI parameter
+///
+/// which after forwarding becomes
+///
+/// RegClassA = COPY RegClassB
+/// ...
+/// RegClassB = COPY RegClassB
+///
+/// so we have reduced the number of cross-class COPYs and potentially
+/// introduced a no COPY that can be removed.
+bool MachineCopyPropagation::isForwardableRegClassCopy(
+ const MachineInstr &Copy, const MachineInstr &UseI) {
+ auto isCross = [&](const MachineOperand &Dst, const MachineOperand &Src) {
+ unsigned DstReg = Dst.getReg();
+ unsigned SrcPhysReg = getPhysReg(Src);
+ const TargetRegisterClass *DstRC;
+ if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
+ DstRC = MRI->getRegClass(DstReg);
+ unsigned DstSubReg = Dst.getSubReg();
+ if (DstSubReg)
+ SrcPhysReg = TRI->getMatchingSuperReg(SrcPhysReg, DstSubReg, DstRC);
+ } else
+ DstRC = TRI->getMinimalPhysRegClass(DstReg);
+
+ return !DstRC->contains(SrcPhysReg);
+ };
+
+ const MachineOperand &CopyDst = Copy.getOperand(0);
+ const MachineOperand &CopySrc = Copy.getOperand(1);
+
+ if (!isCross(CopyDst, CopySrc))
+ return true;
+
+ if (!UseI.isCopy())
+ return false;
+
+ assert(getFullPhysReg(UseI.getOperand(1)) == getFullPhysReg(CopyDst));
+ return !isCross(UseI.getOperand(0), CopySrc);
+}
+
+/// Check that the subregs on the copy source operand (\p CopySrc) and the use
+/// operand to be forwarded to (\p MOUse) are compatible with doing the
+/// forwarding. Also computes the new register and subregister to be used in
+/// the forwarded-to instruction.
+std::tuple<unsigned, unsigned, bool> MachineCopyPropagation::checkUseSubReg(
+ const MachineOperand &CopySrc, const MachineOperand &MOUse) {
+ unsigned NewUseReg = CopySrc.getReg();
+ unsigned NewUseSubReg;
+
+ if (TargetRegisterInfo::isPhysicalRegister(NewUseReg)) {
+ // If MOUse is a virtual reg, we need to apply it to the new physical reg
+ // we're going to replace it with.
+ if (MOUse.getSubReg())
+ NewUseReg = TRI->getSubReg(NewUseReg, MOUse.getSubReg());
+ // If the original use subreg isn't valid on the new src reg, we can't
+ // forward it here.
+ if (!NewUseReg)
+ return std::make_tuple(0, 0, false);
+ NewUseSubReg = 0;
+ } else {
+ // %v1 = COPY %v2:sub1
+ // USE %v1:sub2
+ // The new use is %v2:sub1:sub2
+ NewUseSubReg =
+ TRI->composeSubRegIndices(CopySrc.getSubReg(), MOUse.getSubReg());
+ // Check that NewUseSubReg is valid on NewUseReg
+ if (NewUseSubReg &&
+ !TRI->getSubClassWithSubReg(MRI->getRegClass(NewUseReg), NewUseSubReg))
+ return std::make_tuple(0, 0, false);
+ }
+
+ return std::make_tuple(NewUseReg, NewUseSubReg, true);
+}
+
+/// Check that \p MI does not have implicit uses that overlap with it's \p Use
+/// operand (the register being replaced), since these can sometimes be
+/// implicitly tied to other operands. For example, on AMDGPU:
+///
+/// V_MOVRELS_B32_e32 %VGPR2, %M0<imp-use>, %EXEC<imp-use>, %VGPR2_VGPR3_VGPR4_VGPR5<imp-use>
+///
+/// the %VGPR2 is implicitly tied to the larger reg operand, but we have no
+/// way of knowing we need to update the latter when updating the former.
+bool MachineCopyPropagation::hasImplicitOverlap(const MachineInstr &MI,
+ const MachineOperand &Use) {
+ if (!TargetRegisterInfo::isPhysicalRegister(Use.getReg()))
+ return false;
+
+ for (const MachineOperand &MIUse : MI.uses())
+ if (&MIUse != &Use && MIUse.isReg() && MIUse.isImplicit() &&
+ TRI->regsOverlap(Use.getReg(), MIUse.getReg()))
+ return true;
+
+ return false;
+}
+
+/// Narrow the register class of the forwarded vreg so it matches any
+/// instruction constraints. \p MI is the instruction being forwarded to. \p
+/// MOUse is the operand being replaced in \p MI (which hasn't yet been updated
+/// at the time this function is called). \p NewUseReg and \p NewUseSubReg are
+/// what the \p MOUse will be changed to after forwarding.
+///
+/// If we are forwarding
+/// A:RCA = COPY B:RCB
+/// into
+/// ... = OP A:RCA
+///
+/// then we need to narrow the register class of B so that it is a subclass
+/// of RCA so that it meets the instruction register class constraints.
+void MachineCopyPropagation::narrowRegClass(const MachineInstr &MI,
+ const MachineOperand &MOUse,
+ unsigned NewUseReg,
+ unsigned NewUseSubReg) {
+ if (!TargetRegisterInfo::isVirtualRegister(NewUseReg))
+ return;
+
+ // Make sure the virtual reg class allows the subreg.
+ if (NewUseSubReg) {
+ const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg);
+ const TargetRegisterClass *NewUseRC =
+ TRI->getSubClassWithSubReg(CurUseRC, NewUseSubReg);
+ if (CurUseRC != NewUseRC) {
+ DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI)
+ << " to " << TRI->getRegClassName(NewUseRC) << "\n");
+ MRI->setRegClass(NewUseReg, NewUseRC);
+ }
+ }
+
+ unsigned MOUseOpNo = &MOUse - &MI.getOperand(0);
+ const TargetRegisterClass *InstRC =
+ TII->getRegClass(MI.getDesc(), MOUseOpNo, TRI, *MF);
+ if (InstRC) {
+ const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg);
+ if (NewUseSubReg)
+ InstRC = TRI->getMatchingSuperRegClass(CurUseRC, InstRC, NewUseSubReg);
+ if (!InstRC->hasSubClassEq(CurUseRC)) {
+ const TargetRegisterClass *NewUseRC =
+ TRI->getCommonSubClass(InstRC, CurUseRC);
+ DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI)
+ << " to " << TRI->getRegClassName(NewUseRC) << "\n");
+ MRI->setRegClass(NewUseReg, NewUseRC);
+ }
+ }
+}
+
+/// Update the LiveInterval information to reflect the destination of \p Copy
+/// being forwarded to a use in \p UseMI. \p OrigUseReg is the register being
+/// forwarded through. It should be the destination register of \p Copy and has
+/// already been replaced in \p UseMI at the point this function is called. \p
+/// NewUseReg and \p NewUseSubReg are the register and subregister being
+/// forwarded. They should be the source register of the \p Copy and should be
+/// the value of the \p UseMI operand being forwarded at the point this function
+/// is called. \p UseIsEarlyClobber is true if the use being re-written is in
+/// the EarlyClobber slot index (as opposed to the register slot of most
+/// register operands).
+void MachineCopyPropagation::updateForwardedCopyLiveInterval(
+ const MachineInstr &Copy, const MachineInstr &UseMI, bool UseIsEarlyClobber,
+ unsigned OrigUseReg, unsigned NewUseReg, unsigned NewUseSubReg) {
+
+ assert(TRI->isSubRegisterEq(getPhysReg(OrigUseReg, 0),
+ getFullPhysReg(Copy.getOperand(0))) &&
+ "OrigUseReg mismatch");
+ assert(TRI->isSubRegisterEq(getFullPhysReg(Copy.getOperand(1)),
+ getPhysReg(NewUseReg, 0)) &&
+ "NewUseReg mismatch");
+
+ // Extend live range starting from COPY early-clobber slot, since that
+ // is where the original src live range ends.
+ SlotIndex CopyUseIdx =
+ Indexes->getInstructionIndex(Copy).getRegSlot(true /*=EarlyClobber*/);
+ SlotIndex UseEndIdx =
+ Indexes->getInstructionIndex(UseMI).getRegSlot(UseIsEarlyClobber);
+ if (TargetRegisterInfo::isVirtualRegister(NewUseReg)) {
+ LiveInterval &LI = LIS->getInterval(NewUseReg);
+ LI.extendInBlock(CopyUseIdx, UseEndIdx);
+ LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(NewUseSubReg);
+ for (auto &S : LI.subranges())
+ if ((S.LaneMask & UseMask).any() && S.find(CopyUseIdx))
+ S.extendInBlock(CopyUseIdx, UseEndIdx);
+ } else {
+ assert(NewUseSubReg == 0 && "Unexpected subreg on physical register!");
+ for (MCRegUnitIterator UI(NewUseReg, TRI); UI.isValid(); ++UI) {
+ LiveRange &LR = LIS->getRegUnit(*UI);
+ LR.extendInBlock(CopyUseIdx, UseEndIdx);
+ }
+ }
+
+ if (!TargetRegisterInfo::isVirtualRegister(OrigUseReg))
+ return;
+
+ // Shrink the live-range for the old use reg if the forwarded use was it's
+ // last use.
+ LiveInterval &OrigUseLI = LIS->getInterval(OrigUseReg);
+
+ // Can happen for undef uses.
+ if (OrigUseLI.empty())
+ return;
+
+ SlotIndex CopyDefIdx = Indexes->getInstructionIndex(Copy).getRegSlot();
+ const LiveRange::Segment *OrigUseSeg =
+ OrigUseLI.getSegmentContaining(CopyDefIdx);
+
+ // Only shrink if forwarded use is the end of a segment.
+ if (OrigUseSeg->end != UseEndIdx)
+ return;
+
+ LIS->shrinkToUses(&OrigUseLI, &ShrunkDeadInsts);
+}
+
+void MachineCopyPropagation::LRE_WillEraseInstruction(MachineInstr *MI) {
+ Changed = true;
+}
+
+/// Look for available copies whose destination register is used by \p MI and
+/// replace the use in \p MI with the copy's source register.
+void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
+ // We can't generally forward uses after virtual registers have been renamed
+ // because some targets generate code that has implicit dependencies on the
+ // physical register numbers. For example, in PowerPC, when spilling
+ // condition code registers, the following code pattern is generated:
+ //
+ // %CR7 = COPY %CR0
+ // %R6 = MFOCRF %CR7
+ // %R6 = RLWINM %R6, 29, 31, 31
+ //
+ // where the shift amount in the RLWINM instruction depends on the source
+ // register number of the MFOCRF instruction. If we were to forward %CR0 to
+ // the MFOCRF instruction, the shift amount would no longer be correct.
+ //
+ // FIXME: It may be possible to define a target hook that checks the register
+ // class or user opcode and allows some cases, but prevents cases like the
+ // above from being broken to enable later register copy forwarding.
+ if (!PreRegRewrite)
+ return;
+
+ if (AvailCopyMap.empty())
+ return;
+
+ // Look for non-tied explicit vreg uses that have an active COPY
+ // instruction that defines the physical register allocated to them.
+ // Replace the vreg with the source of the active COPY.
+ for (MachineOperand &MOUse : MI.explicit_uses()) {
+ // Don't forward into undef use operands since doing so can cause problems
+ // with the machine verifier, since it doesn't treat undef reads as reads,
+ // so we can end up with a live range that ends on an undef read, leading to
+ // an error that the live range doesn't end on a read of the live range
+ // register.
+ if (!MOUse.isReg() || MOUse.isTied() || MOUse.isUndef() || MOUse.isDef())
+ continue;
+
+ unsigned UseReg = MOUse.getReg();
+ if (!UseReg)
+ continue;
+
+ // See comment above check for !PreRegRewrite regarding forwarding changing
+ // physical registers.
+ if (!TargetRegisterInfo::isVirtualRegister(UseReg))
+ continue;
+ else {
+ // FIXME: Don't forward COPYs to a use that is on an instruction that
+ // re-defines the same virtual register. This leads to machine
+ // verification failures because of a bug in the greedy
+ // allocator/verifier. The bug is that just after greedy regalloc, we can
+ // end up with code that looks like:
+ //
+ // %vreg1<def> = ...
+ // ...
+ // ... = %vreg1
+ // ...
+ // %vreg1<def> = %vreg1
+ // ...
+ //
+ // verifyLiveInterval() accepts this code as valid since it sees the
+ // second def as part of the same live interval component because
+ // ConnectedVNInfoEqClasses::Classify() sees this second def as a
+ // "two-addr" redefinition, even though the def and source operands are
+ // not tied.
+ //
+ // If we replace just the second use of %vreg1 in the above code, then we
+ // end up with:
+ //
+ // ...
+ // %vreg1<def> = ...
+ // ...
+ // ... = %vreg1
+ // ...
+ // %vreg1<def> = *%vreg2*
+ // ...
+ //
+ // verifyLiveInterval() now rejects this code since it sees these two def
+ // live ranges as being separate components. To get rid of this
+ // restriction on forwarding, regalloc greedy would need to be fixed to
+ // avoid generating code like the first snippet above, as well as the
+ // verifier being fixed to reject such code.
+ if (llvm::any_of(MI.defs(), [UseReg](MachineOperand &Def) {
+ if (!Def.isReg() || !Def.isDef() || Def.getReg() != UseReg)
+ return false;
+ // Only tied use/def operands or a subregister def without the undef
+ // flag should result in connected liveranges.
+ if (Def.isTied())
+ return false;
+ if (Def.getSubReg() != 0 && Def.isUndef())
+ return false;
+ return true;
+ }))
+ continue;
+ }
+
+ UseReg = VRM->getPhys(UseReg);
+
+ // Don't forward COPYs via non-allocatable regs since they can have
+ // non-standard semantics.
+ if (!MRI->isAllocatable(UseReg))
+ continue;
+
+ auto CI = AvailCopyMap.find(UseReg);
+ if (CI == AvailCopyMap.end())
+ continue;
+
+ MachineInstr &Copy = *CI->second;
+ MachineOperand &CopyDst = Copy.getOperand(0);
+ MachineOperand &CopySrc = Copy.getOperand(1);
+
+ // Don't forward COPYs that are already NOPs due to register assignment.
+ if (getPhysReg(CopyDst) == getPhysReg(CopySrc))
+ continue;
+
+ // FIXME: Don't handle partial uses of wider COPYs yet.
+ if (CopyDst.getSubReg() != 0 || UseReg != getPhysReg(CopyDst))
+ continue;
+
+ // Don't forward COPYs of non-allocatable regs unless they are constant.
+ unsigned CopySrcReg = CopySrc.getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(CopySrcReg) &&
+ !MRI->isAllocatable(CopySrcReg) && !MRI->isConstantPhysReg(CopySrcReg))
+ continue;
+
+ if (!isForwardableRegClassCopy(Copy, MI))
+ continue;
+
+ unsigned NewUseReg, NewUseSubReg;
+ bool SubRegOK;
+ std::tie(NewUseReg, NewUseSubReg, SubRegOK) =
+ checkUseSubReg(CopySrc, MOUse);
+ if (!SubRegOK)
+ continue;
+
+ if (hasImplicitOverlap(MI, MOUse))
+ continue;
+
+ if (!DebugCounter::shouldExecute(FwdCounter))
+ continue;
+
+ DEBUG(dbgs() << "MCP: Replacing "
+ << PrintReg(MOUse.getReg(), TRI, MOUse.getSubReg())
+ << "\n with "
+ << PrintReg(NewUseReg, TRI, CopySrc.getSubReg())
+ << "\n in "
+ << MI
+ << " from "
+ << Copy);
+
+ narrowRegClass(MI, MOUse, NewUseReg, NewUseSubReg);
+
+ unsigned OrigUseReg = MOUse.getReg();
+ MOUse.setReg(NewUseReg);
+ MOUse.setSubReg(NewUseSubReg);
+
+ DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n");
+
+ if (PreRegRewrite)
+ updateForwardedCopyLiveInterval(Copy, MI, MOUse.isEarlyClobber(),
+ OrigUseReg, NewUseReg, NewUseSubReg);
+ else
+ for (MachineInstr &KMI :
+ make_range(Copy.getIterator(), std::next(MI.getIterator())))
+ KMI.clearRegisterKills(NewUseReg, TRI);
+
+ ++NumCopyForwards;
+ Changed = true;
+ }
+}
+
void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");
++I;
if (MI->isCopy()) {
- unsigned Def = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
-
- assert(!TargetRegisterInfo::isVirtualRegister(Def) &&
- !TargetRegisterInfo::isVirtualRegister(Src) &&
- "MachineCopyPropagation should be run after register allocation!");
+ unsigned Def = getPhysReg(MI->getOperand(0));
+ unsigned Src = getPhysReg(MI->getOperand(1));
// The two copies cancel out and the source of the first copy
// hasn't been overridden, eliminate the second one. e.g.
// %ECX<def> = COPY %EAX
// =>
// %ECX<def> = COPY %EAX
- if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
- continue;
+ if (!PreRegRewrite)
+ if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
+ continue;
+
+ forwardUses(*MI);
+
+ // Src may have been changed by forwardUses()
+ Src = getPhysReg(MI->getOperand(1));
+ unsigned DefClobber = getFullPhysReg(MI->getOperand(0));
+ unsigned SrcClobber = getFullPhysReg(MI->getOperand(1));
// If Src is defined by a previous copy, the previous copy cannot be
// eliminated.
DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump());
// Copy is now a candidate for deletion.
- if (!MRI->isReserved(Def))
+ // Only look for dead COPYs if we're not running just before
+ // VirtRegRewriter, since presumably these COPYs will have already been
+ // removed.
+ if (!PreRegRewrite && !MRI->isReserved(Def))
MaybeDeadCopies.insert(MI);
// If 'Def' is previously source of another copy, then this earlier copy's
// %xmm2<def> = copy %xmm0
// ...
// %xmm2<def> = copy %xmm9
- ClobberRegister(Def);
+ ClobberRegister(DefClobber);
for (const MachineOperand &MO : MI->implicit_operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
- unsigned Reg = MO.getReg();
+ unsigned Reg = getFullPhysReg(MO);
if (!Reg)
continue;
ClobberRegister(Reg);
// Remember source that's copied to Def. Once it's clobbered, then
// it's no longer available for copy propagation.
- RegList &DestList = SrcMap[Src];
- if (!is_contained(DestList, Def))
- DestList.push_back(Def);
+ RegList &DestList = SrcMap[SrcClobber];
+ if (!is_contained(DestList, DefClobber))
+ DestList.push_back(DefClobber);
continue;
}
+ // Clobber any earlyclobber regs first.
+ for (const MachineOperand &MO : MI->operands())
+ if (MO.isReg() && MO.isEarlyClobber()) {
+ unsigned Reg = getFullPhysReg(MO);
+ // If we have a tied earlyclobber, that means it is also read by this
+ // instruction, so we need to make sure we don't remove it as dead
+ // later.
+ if (MO.isTied())
+ ReadRegister(Reg);
+ ClobberRegister(Reg);
+ }
+
+ forwardUses(*MI);
+
// Not a copy.
SmallVector<unsigned, 2> Defs;
const MachineOperand *RegMask = nullptr;
RegMask = &MO;
if (!MO.isReg())
continue;
- unsigned Reg = MO.getReg();
+ unsigned Reg = getFullPhysReg(MO);
if (!Reg)
continue;
- assert(!TargetRegisterInfo::isVirtualRegister(Reg) &&
- "MachineCopyPropagation should be run after register allocation!");
-
- if (MO.isDef()) {
+ if (MO.isDef() && !MO.isEarlyClobber()) {
Defs.push_back(Reg);
continue;
} else if (MO.readsReg())
ClobberRegister(Reg);
}
+ // Remove instructions that were made dead by shrinking live ranges. Do this
+ // after iterating over instructions to avoid instructions changing while
+ // iterating.
+ if (!ShrunkDeadInsts.empty()) {
+ SmallVector<unsigned, 8> NewRegs;
+ LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, nullptr, this)
+ .eliminateDeadDefs(ShrunkDeadInsts);
+ }
+
// If MBB doesn't have successors, delete the copies whose defs are not used.
// If MBB does have successors, then conservative assume the defs are live-out
// since we don't want to trust live-in lists.
if (MBB.succ_empty()) {
for (MachineInstr *MaybeDead : MaybeDeadCopies) {
+ DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
+ MaybeDead->dump());
assert(!MRI->isReserved(MaybeDead->getOperand(0).getReg()));
MaybeDead->eraseFromParent();
Changed = true;
AvailCopyMap.clear();
CopyMap.clear();
SrcMap.clear();
+ ShrunkDeadInsts.clear();
}
bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getSubtarget().getRegisterInfo();
TII = MF.getSubtarget().getInstrInfo();
MRI = &MF.getRegInfo();
+ this->MF = &MF;
+ if (PreRegRewrite) {
+ Indexes = &getAnalysis<SlotIndexes>();
+ LIS = &getAnalysis<LiveIntervals>();
+ VRM = &getAnalysis<VirtRegMap>();
+ }
+ NoSubRegLiveness = !MRI->subRegLivenessEnabled();
for (MachineBasicBlock &MBB : MF)
CopyPropagateBlock(MBB);
cl::desc("Disable Codegen Prepare"));
static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
cl::desc("Disable Copy Propagation pass"));
+static cl::opt<bool> DisableCopyPropPreRegRewrite("disable-copyprop-prerewrite", cl::Hidden,
+ cl::desc("Disable Copy Propagation Pre-Register Re-write pass"));
static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining",
cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
static cl::opt<bool> EnableImplicitNullChecks(
if (StandardID == &MachineCopyPropagationID)
return applyDisable(TargetID, DisableCopyProp);
+ if (StandardID == &MachineCopyPropagationPreRegRewriteID)
+ return applyDisable(TargetID, DisableCopyPropPreRegRewrite);
+
return TargetID;
}
// Allow targets to change the register assignments before rewriting.
addPreRewrite();
+ // Copy propagate to forward register uses and try to eliminate COPYs that
+ // were not coalesced.
+ addPass(&MachineCopyPropagationPreRegRewriteID);
+
// Finally rewrite virtual registers.
addPass(&VirtRegRewriterID);
; CHECK-LABEL: halfword:
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldrh [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #1]
-; CHECK: strh [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #1]
+; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
+; CHECK: strh [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #1]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
; CHECK-LABEL: word:
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldr [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #2]
-; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #2]
+; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
+; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #2]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
; CHECK-LABEL: doubleword:
; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
; CHECK: ldr [[REG1:x[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #3]
-; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #3]
+; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
+; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #3]
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
; CHECK: add.2d v[[REG:[0-9]+]], v0, v1
; CHECK: add d[[REG3:[0-9]+]], d[[REG]], d1
; CHECK: sub d[[REG2:[0-9]+]], d[[REG]], d1
-; Without advanced copy optimization, we end up with cross register
-; banks copies that cannot be coalesced.
-; CHECK-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
-; With advanced copy optimization, we end up with just one copy
-; to insert the computed high part into the V register.
-; CHECK-OPT-NOT: fmov
+; CHECK-NOT: fmov
; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
-; CHECK-NOOPT: fmov d0, [[COPY_REG3]]
-; CHECK-OPT-NOT: fmov
+; CHECK-NOT: fmov
; CHECK: ins.d v0[1], [[COPY_REG2]]
; CHECK-NEXT: ret
;
; GENERIC: add v[[REG:[0-9]+]].2d, v0.2d, v1.2d
; GENERIC: add d[[REG3:[0-9]+]], d[[REG]], d1
; GENERIC: sub d[[REG2:[0-9]+]], d[[REG]], d1
-; GENERIC-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
-; GENERIC-OPT-NOT: fmov
+; GENERIC-NOT: fmov
; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
-; GENERIC-NOOPT: fmov d0, [[COPY_REG3]]
-; GENERIC-OPT-NOT: fmov
+; GENERIC-NOT: fmov
; GENERIC: ins v0.d[1], [[COPY_REG2]]
; GENERIC-NEXT: ret
%add = add <2 x i64> %a, %b
define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp {
entry:
; CHECK-LABEL: t:
-; CHECK: mov x0, [[REG1:x[0-9]+]]
-; CHECK: mov x1, [[REG2:x[0-9]+]]
+; CHECK: mov [[REG2:x[0-9]+]], x3
+; CHECK: mov [[REG1:x[0-9]+]], x2
+; CHECK: mov x0, x2
+; CHECK: mov x1, x3
; CHECK: bl _foo
; CHECK: mov x0, [[REG1]]
; CHECK: mov x1, [[REG2]]
; CHECK-COMMON-LABEL: test_phi:
; CHECK-COMMON: mov x[[PTR:[0-9]+]], x0
-; CHECK-COMMON: ldr h[[AB:[0-9]+]], [x[[PTR]]]
+; CHECK-COMMON: ldr h[[AB:[0-9]+]], [x0]
; CHECK-COMMON: [[LOOP:LBB[0-9_]+]]:
; CHECK-COMMON: mov.16b v[[R:[0-9]+]], v[[AB]]
; CHECK-COMMON: ldr h[[AB]], [x[[PTR]]]
%val = zext i1 %test to i32
; CHECK: cset {{[xw][0-9]+}}, ne
+; CHECK: mov [[RHSCOPY:w[0-9]+]], [[RHS]]
+; CHECK: mov [[LHSCOPY:w[0-9]+]], [[LHS]]
+
store i32 %val, i32* @var
call void @bar()
; Currently, the comparison is emitted again. An MSR/MRS pair would also be
; acceptable, but assuming the call preserves NZCV is not.
br i1 %test, label %iftrue, label %iffalse
-; CHECK: cmp [[LHS]], [[RHS]]
+; CHECK: cmp [[LHSCOPY]], [[RHSCOPY]]
; CHECK: b.eq
iftrue:
define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg) {
;CHECK-LABEL: test
entry:
-; A53: mov [[DATA:w[0-9]+]], w1
; A53: str q{{[0-9]+}}, {{.*}}
; A53: str q{{[0-9]+}}, {{.*}}
-; A53: str [[DATA]], {{.*}}
+; A53: str w1, {{.*}}
%0 = bitcast %struct1* %fde to i8*
tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 8, i1 false)
define void @test(i32 %px) {
; CHECK_LABEL: test:
; CHECK_LABEL: %entry
-; CHECK: subs
-; CHECK-NEXT: csel
+; CHECK: subs [[REG0:w[0-9]+]],
+; CHECK: csel {{w[0-9]+}}, wzr, [[REG0]]
entry:
%sub = add nsw i32 %px, -1
%cmp = icmp slt i32 %px, 1
; GCN: s_mov_b32 s5, s32
; GCN: s_add_u32 s32, s32, 0x300
-; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-9]+]], s14
-; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15
-; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16
+; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-57-9][0-9]*]], s14
+; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-68-9][0-9]*]], s15
+; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-79][0-9]*]], s16
; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7]
; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9]
; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11]
-; GCN-DAG: s_mov_b32 s6, [[SAVE_X]]
-; GCN-DAG: s_mov_b32 s7, [[SAVE_Y]]
-; GCN-DAG: s_mov_b32 s8, [[SAVE_Z]]
+; GCN-DAG: s_mov_b32 s6, s14
+; GCN-DAG: s_mov_b32 s7, s15
+; GCN-DAG: s_mov_b32 s8, s16
; GCN: s_swappc_b64
; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4
; GCN-LABEL: {{^}}v_mad_mix_v2f32:
; GFX9: v_mov_b32_e32 v3, v1
-; GFX9-NEXT: v_mad_mix_f32 v1, v0, v3, v2 op_sel:[1,1,1]
+; GFX9-NEXT: v_mad_mix_f32 v1, v0, v1, v2 op_sel:[1,1,1]
; GFX9-NEXT: v_mad_mix_f32 v0, v0, v3, v2
; CIVI: v_mac_f32
; GCN-LABEL: {{^}}v_mad_mix_v2f32_shuffle:
; GCN: s_waitcnt
; GFX9-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-NEXT: v_mad_mix_f32 v1, v0, v3, v2 op_sel:[0,1,1]
+; GFX9-NEXT: v_mad_mix_f32 v1, v0, v1, v2 op_sel:[0,1,1]
; GFX9-NEXT: v_mad_mix_f32 v0, v0, v3, v2 op_sel:[1,0,1]
; GFX9-NEXT: s_setpc_b64
; GCN-LABEL: {{^}}v_mad_mix_v2f32_f32imm1:
; GFX9: v_mov_b32_e32 v2, v1
; GFX9: v_mov_b32_e32 v3, 1.0
-; GFX9: v_mad_mix_f32 v1, v0, v2, v3 op_sel:[1,1,0] op_sel_hi:[1,1,0] ; encoding
+; GFX9: v_mad_mix_f32 v1, v0, v1, v3 op_sel:[1,1,0] op_sel_hi:[1,1,0] ; encoding
; GFX9: v_mad_mix_f32 v0, v0, v2, v3 op_sel_hi:[1,1,0] ; encoding
define <2 x float> @v_mad_mix_v2f32_f32imm1(<2 x half> %src0, <2 x half> %src1) #0 {
%src0.ext = fpext <2 x half> %src0 to <2 x float>
; GCN-LABEL: {{^}}v_mad_mix_v2f32_cvtf16imminv2pi:
; GFX9: v_mov_b32_e32 v2, v1
; GFX9: v_mov_b32_e32 v3, 0x3e230000
-; GFX9: v_mad_mix_f32 v1, v0, v2, v3 op_sel:[1,1,0] op_sel_hi:[1,1,0] ; encoding
+; GFX9: v_mad_mix_f32 v1, v0, v1, v3 op_sel:[1,1,0] op_sel_hi:[1,1,0] ; encoding
; GFX9: v_mad_mix_f32 v0, v0, v2, v3 op_sel_hi:[1,1,0] ; encoding
define <2 x float> @v_mad_mix_v2f32_cvtf16imminv2pi(<2 x half> %src0, <2 x half> %src1) #0 {
%src0.ext = fpext <2 x half> %src0 to <2 x float>
; GCN-LABEL: {{^}}v_mad_mix_v2f32_f32imminv2pi:
; GFX9: v_mov_b32_e32 v2, v1
; GFX9: v_mov_b32_e32 v3, 0.15915494
-; GFX9: v_mad_mix_f32 v1, v0, v2, v3 op_sel:[1,1,0] op_sel_hi:[1,1,0] ; encoding
+; GFX9: v_mad_mix_f32 v1, v0, v1, v3 op_sel:[1,1,0] op_sel_hi:[1,1,0] ; encoding
; GFX9: v_mad_mix_f32 v0, v0, v2, v3 op_sel_hi:[1,1,0] ; encoding
define <2 x float> @v_mad_mix_v2f32_f32imminv2pi(<2 x half> %src0, <2 x half> %src1) #0 {
%src0.ext = fpext <2 x half> %src0 to <2 x float>
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; GCN-LABEL: {{^}}vgpr:
-; GCN: v_mov_b32_e32 v1, v0
-; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
-; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
+; GCN-DAG: v_mov_b32_e32 v1, v0
+; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm
; GCN: s_waitcnt expcnt(0)
+; GCN: v_add_f32_e32 v0, 1.0, v0
; GCN-NOT: s_endpgm
define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
bb:
}
; GCN-LABEL: {{^}}both:
-; GCN: v_mov_b32_e32 v1, v0
-; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
-; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
-; GCN-DAG: s_add_i32 s0, s3, 2
+; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm
+; GCN-DAG: v_mov_b32_e32 v1, v0
; GCN-DAG: s_mov_b32 s1, s2
-; GCN: s_mov_b32 s2, s3
; GCN: s_waitcnt expcnt(0)
+; GCN: v_add_f32_e32 v0, 1.0, v0
+; GCN-DAG: s_add_i32 s0, s3, 2
+; GCN-DAG: s_mov_b32 s2, s3
; GCN-NOT: s_endpgm
define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
bb:
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
%oldval = extractvalue { i32, i1 } %pair, 0
-; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
+; CHECK-ARMV7: mov r[[ADDR:[0-9]+]], r0
+; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r0]
; CHECK-ARMV7: cmp [[OLDVAL]], r1
; CHECK-ARMV7: bne [[FAIL_BB:\.?LBB[0-9]+_[0-9]+]]
; CHECK-ARMV7: dmb ish
; CHECK-ARMV7: dmb ish
; CHECK-ARMV7: bx lr
-; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
+; CHECK-T2: mov r[[ADDR:[0-9]+]], r0
+; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r0]
; CHECK-T2: cmp [[OLDVAL]], r1
; CHECK-T2: bne [[FAIL_BB:\.?LBB.*]]
; CHECK-T2: dmb ish
; ARM: movvc r[[R1]], #0
; THUMBV6: mov r[[R2:[0-9]+]], r[[R0:[0-9]+]]
- ; THUMBV6: adds r[[R3:[0-9]+]], r[[R2]], r[[R1:[0-9]+]]
+ ; THUMBV6: adds r[[R3:[0-9]+]], r[[R0]], r[[R1:[0-9]+]]
; THUMBV6: movs r[[R0]], #0
; THUMBV6: movs r[[R1]], #1
; THUMBV6: cmp r[[R3]], r[[R2]]
; CHECK-APPLE: beq
; CHECK-APPLE: mov r0, #16
; CHECK-APPLE: malloc
-; CHECK-APPLE: strb r{{.*}}, [{{.*}}[[ID]], #8]
+; CHECK-APPLE: strb r{{.*}}, [r0, #8]
; CHECK-APPLE: ble
; CHECK-APPLE: mov r8, [[ID]]
; MMR3: subu16 $5, $[[T19]], $[[T20]]
; MMR6: move $[[T0:[0-9]+]], $7
-; MMR6: sw $[[T0]], 8($sp)
+; MMR6: sw $7, 8($sp)
; MMR6: move $[[T1:[0-9]+]], $5
; MMR6: sw $4, 12($sp)
; MMR6: lw $[[T2:[0-9]+]], 48($sp)
ret double %r
; CHECK: @foo3
-; CHECK: xsnmsubadp [[REG:[0-9]+]], {{[0-9]+}}, [[REG]]
+; CHECK: fmr [[REG:[0-9]+]], [[REG2:[0-9]+]]
+; CHECK: xsnmsubadp [[REG]], {{[0-9]+}}, [[REG2]]
; CHECK: xsmaddmdp
; CHECK: xsmaddadp
}
ret i32 %e.0
; CHECK: @foo
; CHECK: mr [[NEWREG:[0-9]+]], 3
+; CHECK: mr [[REG1:[0-9]+]], 4
; CHECK: mtvsrd [[NEWREG2:[0-9]+]], 4
-; CHECK: mffprd [[REG1:[0-9]+]], [[NEWREG2]]
; CHECK: add {{[0-9]+}}, [[NEWREG]], [[REG1]]
; CHECK: mffprd [[REG2:[0-9]+]], [[NEWREG2]]
; CHECK: add {{[0-9]+}}, [[REG2]], [[NEWREG]]
; CHECK-DAG: mr [[REG:[0-9]+]], 3
; CHECK-DAG: li 0, 1076
-; CHECK: stw [[REG]],
+; CHECK-DAG: stw 3,
; CHECK: #APP
; CHECK: sc
define i64 @testOptimizeLiAddToAddi(i64 %a) {
; CHECK-LABEL: testOptimizeLiAddToAddi:
-; CHECK: addi 3, 30, 2444
+; CHECK: addi 3, 3, 2444
; CHECK: bl callv
; CHECK: addi 3, 30, 234
; CHECK: bl call
;CHECK-LABEL: straight_test:
; test1 may have been merged with entry
;CHECK: mr [[TAGREG:[0-9]+]], 3
-;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
+;CHECK: andi. {{[0-9]+}}, [[TAGREG:[0-9]+]], 1
;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]]
;CHECK-NEXT: # %test2
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
; CHECK-LABEL: test_load_add_i32
; CHECK: membar
-; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
-; CHECK: cas [%o0], [[V]], [[U]]
+; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
+; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
+; CHECK: cas [%o0], [[V]], [[V2]]
; CHECK: membar
define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
entry:
define i32 @b_to_bx(i32 %value) {
; CHECK-LABEL: b_to_bx:
; DISABLE: push {r7, lr}
-; CHECK: cmp r1, #49
+; CHECK: cmp r0, #49
; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]]
; ENABLE: push {r7, lr}
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl %ecx, %edx
-; CHECK-NEXT: imull %edx, %edx
+; CHECK-NEXT: imull %ecx, %edx
; CHECK-NEXT: imull %eax, %ecx
; CHECK-NEXT: imull %eax, %eax
; CHECK-NEXT: addl %edx, %eax
; CHECK-DAG: movl %edx, %[[r1:[^ ]*]]
; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]]
; CHECK-DAG: movl %[[r2]], 4(%esp)
-; CHECK-DAG: movl %[[r1]], (%esp)
+; CHECK-DAG: movl %edx, (%esp)
; CHECK: movl %esp, %[[reg:[^ ]*]]
; CHECK: pushl %[[reg]]
; CHECK: calll _addrof_i64
; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: packuswb %xmm1, %xmm2
; SSE2-NEXT: packuswb %xmm10, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: psrld $1, %xmm12
; SSE2-NEXT: pand %xmm0, %xmm12
; SSE2-NEXT: movdqu %xmm7, (%rax)
; SSE2-NEXT: movdqu %xmm11, (%rax)
; SSE2-NEXT: movdqu %xmm13, (%rax)
-; SSE2-NEXT: movdqu %xmm1, (%rax)
+; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v64i8:
; CHECK-NEXT: movq %rdx, %r14
; CHECK-NEXT: movq %rsi, %r15
; CHECK-NEXT: movq %rdi, %rbx
-; CHECK-NEXT: vmovaps (%rbx), %ymm0
+; CHECK-NEXT: vmovaps (%rdi), %ymm0
; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
-; CHECK-NEXT: vmovaps (%r15), %ymm1
+; CHECK-NEXT: vmovaps (%rsi), %ymm1
; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
-; CHECK-NEXT: vmovaps (%r14), %ymm2
+; CHECK-NEXT: vmovaps (%rdx), %ymm2
; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
; CHECK-NEXT: callq dummy
; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: subq $112, %rsp
; CHECK-NEXT: movq %rdi, %rbx
-; CHECK-NEXT: vmovups (%rbx), %zmm0
+; CHECK-NEXT: vmovups (%rdi), %zmm0
; CHECK-NEXT: vmovups %zmm0, (%rsp) ## 64-byte Spill
; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %zmm1
-; CHECK-NEXT: vmovaps %zmm1, (%rbx)
+; CHECK-NEXT: vmovaps %zmm1, (%rdi)
; CHECK-NEXT: callq _Print__512
; CHECK-NEXT: vmovups (%rsp), %zmm0 ## 64-byte Reload
; CHECK-NEXT: callq _Print__512
; KNL_X32-NEXT: movl %edi, (%esp)
; KNL_X32-NEXT: calll _test11
; KNL_X32-NEXT: movl %eax, %ebx
-; KNL_X32-NEXT: movzbl %bl, %eax
+; KNL_X32-NEXT: movzbl %al, %eax
; KNL_X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
; KNL_X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
; KNL_X32-NEXT: movl %edi, (%esp)
; KNL-NEXT: kmovw %esi, %k0
; KNL-NEXT: kshiftlw $7, %k0, %k2
; KNL-NEXT: kshiftrw $15, %k2, %k2
-; KNL-NEXT: kmovw %k2, %eax
; KNL-NEXT: kshiftlw $6, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %ecx
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kshiftlw $1, %k0, %k0
; KNL-NEXT: kshiftrw $1, %k0, %k0
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: kshiftlw $7, %k1, %k1
+; KNL-NEXT: kshiftlw $7, %k2, %k1
; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqw %zmm0, %xmm0
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kshiftlw $7, %k1, %k2
; SKX-NEXT: kshiftrw $15, %k2, %k2
-; SKX-NEXT: kmovd %k2, %eax
; SKX-NEXT: kshiftlw $6, %k1, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1
-; SKX-NEXT: kmovd %k1, %ecx
; SKX-NEXT: vpmovm2q %k0, %zmm0
-; SKX-NEXT: kmovd %ecx, %k0
-; SKX-NEXT: vpmovm2q %k0, %zmm1
+; SKX-NEXT: vpmovm2q %k1, %zmm1
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; SKX-NEXT: vpmovq2m %zmm2, %k0
; SKX-NEXT: kshiftlb $1, %k0, %k0
; SKX-NEXT: kshiftrb $1, %k0, %k0
-; SKX-NEXT: kmovd %eax, %k1
-; SKX-NEXT: kshiftlb $7, %k1, %k1
+; SKX-NEXT: kshiftlb $7, %k2, %k1
; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: vzeroupper
; AVX512BW-NEXT: kmovd %esi, %k0
; AVX512BW-NEXT: kshiftlw $7, %k0, %k2
; AVX512BW-NEXT: kshiftrw $15, %k2, %k2
-; AVX512BW-NEXT: kmovd %k2, %eax
; AVX512BW-NEXT: kshiftlw $6, %k0, %k0
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %ecx
; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kshiftlw $1, %k0, %k0
; AVX512BW-NEXT: kshiftrw $1, %k0, %k0
-; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: kshiftlw $7, %k1, %k1
+; AVX512BW-NEXT: kshiftlw $7, %k2, %k1
; AVX512BW-NEXT: korw %k1, %k0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2
; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2
-; AVX512DQ-NEXT: kmovw %k2, %eax
; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1
; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
-; AVX512DQ-NEXT: kmovw %k1, %ecx
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: kmovw %ecx, %k0
-; AVX512DQ-NEXT: vpmovm2q %k0, %zmm1
+; AVX512DQ-NEXT: vpmovm2q %k1, %zmm1
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0
; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0
; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0
-; AVX512DQ-NEXT: kmovw %eax, %k1
-; AVX512DQ-NEXT: kshiftlb $7, %k1, %k1
+; AVX512DQ-NEXT: kshiftlb $7, %k2, %k1
; AVX512DQ-NEXT: korb %k1, %k0, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kshiftlw $7, %k1, %k2
; CHECK-NEXT: kshiftrw $15, %k2, %k2
-; CHECK-NEXT: kmovd %k2, %eax
; CHECK-NEXT: kshiftlw $6, %k1, %k1
; CHECK-NEXT: kshiftrw $15, %k1, %k1
-; CHECK-NEXT: kmovd %k1, %ecx
; CHECK-NEXT: vpmovm2q %k0, %zmm0
-; CHECK-NEXT: kmovd %ecx, %k0
-; CHECK-NEXT: vpmovm2q %k0, %zmm1
+; CHECK-NEXT: vpmovm2q %k1, %zmm1
; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7] sched: [5:0.50]
; CHECK-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; CHECK-NEXT: vpmovq2m %zmm2, %k0
; CHECK-NEXT: kshiftlb $1, %k0, %k0
; CHECK-NEXT: kshiftrb $1, %k0, %k0
-; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: kshiftlb $7, %k1, %k1
+; CHECK-NEXT: kshiftlb $7, %k2, %k1
; CHECK-NEXT: korb %k1, %k0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: vzeroupper # sched: [4:1.00]
; AVX512F-32-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm2
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: movl %esi, %eax
+; AVX512F-32-NEXT: movl %ecx, %eax
; AVX512F-32-NEXT: shrl $30, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
; AVX512F-32-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm1
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: movl %esi, %eax
+; AVX512F-32-NEXT: movl %ecx, %eax
; AVX512F-32-NEXT: shrl $31, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
; AVX512F-32-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm2
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: movl %esi, %eax
+; AVX512F-32-NEXT: movl %ecx, %eax
; AVX512F-32-NEXT: shrl $30, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
; AVX512F-32-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm1
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: movl %esi, %eax
+; AVX512F-32-NEXT: movl %ecx, %eax
; AVX512F-32-NEXT: shrl $31, %eax
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
; SSE2-LABEL: test_negative_zero_1:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movaps %xmm0, %xmm1
-; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE-NEXT: cvtss2sd %xmm2, %xmm4
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
; SSE-NEXT: movaps %xmm2, %xmm6
-; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm2[1],xmm6[1]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3]
; SSE-NEXT: movaps {{.*#+}} xmm7
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: andps %xmm7, %xmm2
; SSE-NEXT: orps %xmm0, %xmm4
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; SSE-NEXT: andps %xmm7, %xmm0
; SSE-NEXT: cvtss2sd %xmm3, %xmm3
; SSE-NEXT: andps %xmm8, %xmm3
; SSE-NEXT: orps %xmm6, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movaps %xmm3, %xmm1
-; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
; SSE-NEXT: andps %xmm5, %xmm1
; SSE-NEXT: xorps %xmm6, %xmm6
; SSE-NEXT: cvtsd2ss %xmm2, %xmm6
; SSE-LABEL: combine_vec_shl_ext_shl0:
; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: pslld $20, %xmm1
; SSE-NEXT: pslld $20, %xmm0
; SSE: # BB#0:
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: addss %xmm2, %xmm2
+; SSE-NEXT: addss %xmm0, %xmm2
; SSE-NEXT: mulss %xmm1, %xmm2
; SSE-NEXT: mulss %xmm0, %xmm0
; SSE-NEXT: mulss %xmm1, %xmm1
; SSE-LABEL: complex_square_f64:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: addsd %xmm2, %xmm2
+; SSE-NEXT: addsd %xmm0, %xmm2
; SSE-NEXT: mulsd %xmm1, %xmm2
; SSE-NEXT: mulsd %xmm0, %xmm0
; SSE-NEXT: mulsd %xmm1, %xmm1
; SSE-LABEL: complex_mul_f64:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE-NEXT: movaps %xmm1, %xmm3
-; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: mulsd %xmm0, %xmm4
; SSE-NEXT: mulsd %xmm1, %xmm0
; X64: # BB#0: # %entry
; X64-NEXT: movq %rdi, %rcx
; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F
-; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %rdx
; X64-NEXT: shrq $12, %rdx
; X64-NEXT: imulq $12345, %rdx, %rax # imm = 0x3039
; CHECK-LABEL: @test_fmaxf
; SSE: movaps %xmm0, %xmm2
-; SSE-NEXT: cmpunordss %xmm2, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: maxss %xmm0, %xmm1
; CHECK-LABEL: @test_fmax
; SSE: movapd %xmm0, %xmm2
-; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: maxsd %xmm0, %xmm1
; CHECK-LABEL: @test_intrinsic_fmaxf
; SSE: movaps %xmm0, %xmm2
-; SSE-NEXT: cmpunordss %xmm2, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: maxss %xmm0, %xmm1
; CHECK-LABEL: @test_intrinsic_fmax
; SSE: movapd %xmm0, %xmm2
-; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: maxsd %xmm0, %xmm1
; X64-LABEL: fast_fmuladd_opts:
; X64: # BB#0:
; X64-NEXT: movaps %xmm0, %xmm1
-; X64-NEXT: addss %xmm1, %xmm1
+; X64-NEXT: addss %xmm0, %xmm1
; X64-NEXT: addss %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
; CHECK-LABEL: @test_fminf
; SSE: movaps %xmm0, %xmm2
-; SSE-NEXT: cmpunordss %xmm2, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: minss %xmm0, %xmm1
; CHECK-LABEL: @test_fmin
; SSE: movapd %xmm0, %xmm2
-; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: minsd %xmm0, %xmm1
; CHECK-LABEL: @test_intrinsic_fminf
; SSE: movaps %xmm0, %xmm2
-; SSE-NEXT: cmpunordss %xmm2, %xmm2
+; SSE-NEXT: cmpunordss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: andps %xmm1, %xmm3
; SSE-NEXT: minss %xmm0, %xmm1
; CHECK-LABEL: @test_intrinsic_fmin
; SSE: movapd %xmm0, %xmm2
-; SSE-NEXT: cmpunordsd %xmm2, %xmm2
+; SSE-NEXT: cmpunordsd %xmm0, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm1, %xmm3
; SSE-NEXT: minsd %xmm0, %xmm1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: movaps %xmm0, %xmm1
-; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq $0, (%rsp)
; CHECK: # BB#0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: movaps %xmm0, %xmm1
-; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movq $0, (%rsp)
; SSE-LABEL: not_a_hsub_2:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
; SSE-NEXT: subss %xmm3, %xmm2
; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE-NEXT: subss %xmm3, %xmm0
; SSE-NEXT: movaps %xmm1, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[2,3]
; SSE-NEXT: movaps %xmm1, %xmm4
-; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
; SSE-NEXT: subss %xmm4, %xmm3
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE-NEXT: subss %xmm4, %xmm1
; SSE-LABEL: not_a_hsub_3:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
; SSE-NEXT: subsd %xmm2, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE-NEXT: subsd %xmm0, %xmm2
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-LABEL: test5_undef:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: addss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: addss %xmm2, %xmm0
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: subq $48, %rsp
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
-; CHECK-LIBCALL-NEXT: movzwl (%rbx), %edi
+; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: subq $16, %rsp
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
-; CHECK-LIBCALL-NEXT: movzwl 4(%rbx), %edi
+; CHECK-LIBCALL-NEXT: movzwl 4(%rdi), %edi
; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi
; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill
; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp
; CHECK-I686-NEXT: movaps %xmm0, %xmm1
-; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
; CHECK-I686-NEXT: movss %xmm1, (%esp)
; CHECK-I686-NEXT: calll __gnu_f2h_ieee
; CHECK-I686-NEXT: movw %ax, %si
; CHECK-NEXT: fstpt (%esp)
; CHECK-NEXT: calll _ceil
; CHECK-NEXT: fld %st(0)
+; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: fistpl %st(0)
; CHECK-NEXT: ## InlineAsm End
call void @foo()
; CHECK-LABEL: bar:
; CHECK: callq foo
- ; CHECK-NEXT: movl %eax, %r15d
+ ; CHECK-NEXT: movl %edi, %r15d
call void asm sideeffect "movl $0, %r12d", "{r15}~{r12}"(i32 %X)
ret void
}
; X64-LABEL: print_framealloc_from_fp:
; X64: movq %rcx, %[[parent_fp:[a-z]+]]
-; X64: movl .Lalloc_func$frame_escape_0(%[[parent_fp]]), %edx
+; X64: movl .Lalloc_func$frame_escape_0(%rcx), %edx
; X64: leaq {{.*}}(%rip), %[[str:[a-z]+]]
; X64: movq %[[str]], %rcx
; X64: callq printf
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movl %esi, %edx
; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: testl %edx, %edx
+; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: je LBB0_1
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: LBB0_2: ## %while.body
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movq %rsi, %rdx
; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: testq %rdx, %rdx
+; CHECK-NEXT: testq %rsi, %rsi
; CHECK-NEXT: je LBB2_1
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: LBB2_2: ## %while.body
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %esi
; X32-NEXT: movl %esi, %ebx
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: pushl %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl $0
; X32-NEXT: pushl %edi
; X32-NEXT: movl %ebx, %esi
-; X32-NEXT: pushl %esi
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl %edi
-; X32-NEXT: movl %edi, %ebx
; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: movl %edi, %ebx
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %edi, %eax
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X64-NEXT: adcq $0, %rbp
; X64-NEXT: addq %rcx, %rbx
; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rcx, %r11
; X64-NEXT: adcq %rdi, %rbp
; X64-NEXT: setb %bl
; X64-NEXT: movzbl %bl, %ebx
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r11, %r12
-; X64-NEXT: movq %r11, %r8
+; X64-NEXT: movq %rcx, %r12
+; X64-NEXT: movq %rcx, %r8
; X64-NEXT: addq %rax, %r12
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq %rdi, %r9
-; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdi, (%rsp) # 8-byte Spill
; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: addq %rbp, %r12
; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rdx, %rbx
; X64-NEXT: movq 16(%rsi), %rax
; X64-NEXT: movq %rsi, %r13
-; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rbx, %r11
; X64-NEXT: movq %r8, %rax
; X64-NEXT: movq %r8, %rbp
-; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %rdi, %rax
; X64-NEXT: movq %r9, %rax
; X64-NEXT: adcq %rcx, %rax
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rdi, %rax
-; X64-NEXT: movq %rdi, %r9
-; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq %rdx, %rax
; X64-NEXT: adcq %rcx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq 32(%r13), %rax
; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: addq %r9, %rax
+; X64-NEXT: addq %rdi, %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdi, %r9
+; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: adcq %r15, %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %rsi, %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: addq %rcx, %r11
+; X64-NEXT: addq %rbx, %r11
; X64-NEXT: adcq %rsi, %rbp
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: setb %bl
; X64-NEXT: adcq %rbx, %r10
; X64-NEXT: movq %rcx, %rdx
; X64-NEXT: movq %rcx, %r12
-; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %r9, %rdx
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %r11, %r8
-; X64-NEXT: adcq %r8, %r15
+; X64-NEXT: adcq %r11, %r15
; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rax, %r14
; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rdx, %r12
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %r10, %rbp
-; X64-NEXT: mulq %rbp
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %rbp
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rsi, %rbx
; X64-NEXT: adcq $0, %r15
; X64-NEXT: adcq $0, %r12
; X64-NEXT: movq %r10, %rbx
-; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq %r10, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rbx, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %rbx
-; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %rbp, %r8
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r11, %rsi
-; X64-NEXT: mulq %rsi
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, %r13
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
; X64-NEXT: adcq %rdx, %r10
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %r11, %rbp
-; X64-NEXT: mulq %rbp
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: mulq %rbp
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rdi, %rbx
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %r14
; X64-NEXT: movq %r8, %rbp
-; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: movq %r8, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r11
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: adcq $0, %r9
; X64-NEXT: adcq $0, %r10
; X64-NEXT: movq %rbp, %rsi
-; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq %rbp, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r14
; X64-NEXT: adcq $0, %r15
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: movq %r8, %rdi
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %r9
; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %rbx, %r8
; X64-NEXT: movq %rcx, %r14
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %r10, %rdi
-; X64-NEXT: mulq %rdi
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: mulq %rdi
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %r11, %rbx
; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq $0, %r14
; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r13, %rbx
-; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq %r13, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: addq %r8, %rcx
; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq %r13, %rax
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
; X64-NEXT: mulq %r13
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq %r10, %rsi
-; X64-NEXT: mulq %rsi
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
; X64-NEXT: movq %r8, %rax
-; X64-NEXT: mulq %rsi
+; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rdi
; X64-NEXT: addq %rcx, %rdi
; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r10
-; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %rdi
; X64-NEXT: addq %rsi, %rdi
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: movq %rdx, %r14
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
-; X64-NEXT: addq %rbx, %r12
+; X64-NEXT: addq %rax, %r12
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload
-; X64-NEXT: adcq %r14, %r15
+; X64-NEXT: adcq %rdx, %r15
; X64-NEXT: addq %rdi, %r12
; X64-NEXT: adcq %rcx, %r15
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r11, %rsi
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %rsi
+; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, %r9
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
-; X64-NEXT: addq %r9, %rbp
+; X64-NEXT: addq %rax, %rbp
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: addq %rsi, %rbp
; X64-NEXT: movq 88(%rsi), %rax
; X64-NEXT: movq %rsi, %r9
; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq %r8, %r10
; X64-NEXT: addq %rbx, %rsi
; X64-NEXT: adcq %rbp, %r10
-; X64-NEXT: movq %r9, %rdi
-; X64-NEXT: movq 64(%rdi), %r13
+; X64-NEXT: movq 64(%r9), %r13
; X64-NEXT: movq %r13, %rax
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq 72(%rdi), %r9
+; X64-NEXT: movq 72(%r9), %r9
; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, %r15
; X64-NEXT: movq %r12, %rcx
-; X64-NEXT: addq %r15, %rcx
-; X64-NEXT: adcq %r11, %r8
+; X64-NEXT: addq %rax, %rcx
+; X64-NEXT: adcq %rdx, %r8
; X64-NEXT: addq %rbp, %rcx
; X64-NEXT: adcq %rbx, %r8
; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
; X64-NEXT: setb %r10b
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: movq %r8, %rdi
-; X64-NEXT: mulq %rdi
+; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r9
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rdi, %r12
+; X64-NEXT: mulq %r8
+; X64-NEXT: movq %r8, %r12
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rcx, %rbx
; X64-NEXT: imulq %rcx, %rdi
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r12, %rsi
-; X64-NEXT: mulq %rsi
+; X64-NEXT: mulq %r12
; X64-NEXT: movq %rax, %r9
; X64-NEXT: addq %rdi, %rdx
; X64-NEXT: movq 104(%rbp), %r8
; X64-NEXT: movq 8(%rsi), %rbp
; X64-NEXT: movq %r15, %rax
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: mulq %rsi
+; X64-NEXT: mulq %rdx
; X64-NEXT: movq %rdx, %r9
; X64-NEXT: movq %rax, %r8
; X64-NEXT: movq %r11, %rax
; X64-NEXT: movq %r11, %rax
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rbp, %r14
-; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbp
; X64-NEXT: addq %rcx, %rbp
; X64-NEXT: adcq %rbx, %rsi
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: movq %r10, %rbx
-; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r10, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r13
; X64-NEXT: movq %rax, %r10
; X64-NEXT: movq %r15, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: # kill: %RAX<kill>
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, %r15
-; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %r10, %r15
; X64-NEXT: adcq %r13, %rdx
; X64-NEXT: addq %rbp, %r15
; X64-NEXT: mulq %rdx
; X64-NEXT: movq %rdx, %r14
; X64-NEXT: movq %rax, %r11
-; X64-NEXT: addq %r11, %r10
-; X64-NEXT: adcq %r14, %r13
+; X64-NEXT: addq %rax, %r10
+; X64-NEXT: adcq %rdx, %r13
; X64-NEXT: addq %rbp, %r10
; X64-NEXT: adcq %rsi, %r13
; X64-NEXT: addq %r8, %r10
; X64-NEXT: movq 16(%rsi), %r8
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %rcx, %r9
-; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %r12
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, %rbp
-; X64-NEXT: addq %rbp, %r11
+; X64-NEXT: addq %rax, %r11
; X64-NEXT: adcq %rdx, %r14
; X64-NEXT: addq %r9, %r11
; X64-NEXT: adcq %rbx, %r14
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: imulq %rdi, %rcx
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %r8
+; X64-NEXT: mulq %rdx
; X64-NEXT: addq %rcx, %rdx
; X64-NEXT: imulq %r8, %rsi
; X64-NEXT: addq %rsi, %rdx
; SSE-LABEL: _mul4xi32toi64b:
; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm1, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT: pmuludq %xmm0, %xmm1
; SSE2-LABEL: mul_v16i8c:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
; SSE2-NEXT: pmullw %xmm2, %xmm1
; SSE2-LABEL: mul_v16i8:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm3
; SSE2-NEXT: pmullw %xmm2, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE2-LABEL: mul_v32i8c:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-LABEL: mul_v32i8:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: packuswb %xmm5, %xmm0
; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm2, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-LABEL: mul_v64i8c:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: packuswb %xmm6, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: packuswb %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: packuswb %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
; SSE2-NEXT: psraw $8, %xmm6
; SSE2-NEXT: pmullw %xmm4, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117]
; SSE41-NEXT: pmullw %xmm6, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
; SSE2-LABEL: mul_v64i8:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm4, %xmm8
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15]
; SSE2-NEXT: psraw $8, %xmm8
; SSE2-NEXT: movdqa %xmm0, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm9
; SSE2-NEXT: pmullw %xmm8, %xmm9
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: packuswb %xmm9, %xmm0
; SSE2-NEXT: movdqa %xmm5, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
; SSE2-NEXT: psraw $8, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: pmullw %xmm9, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm1
; SSE2-NEXT: packuswb %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm6, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm2
; SSE2-NEXT: packuswb %xmm5, %xmm2
; SSE2-NEXT: movdqa %xmm7, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15]
; SSE2-NEXT: psraw $8, %xmm4
; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
; SSE2-NEXT: psraw $8, %xmm5
; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE41-NEXT: movdqa %xmm1, %xmm8
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pmovsxbw %xmm4, %xmm9
-; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
; SSE41-NEXT: pmullw %xmm9, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: pand %xmm9, %xmm0
; CHECK-LABEL: pow_wrapper:
; CHECK: # BB#0:
; CHECK-NEXT: movapd %xmm0, %xmm1
-; CHECK-NEXT: mulsd %xmm1, %xmm1
+; CHECK-NEXT: mulsd %xmm0, %xmm1
; CHECK-NEXT: mulsd %xmm1, %xmm0
; CHECK-NEXT: mulsd %xmm1, %xmm1
; CHECK-NEXT: mulsd %xmm1, %xmm0
; SSE-NEXT: cvtps2pd %xmm0, %xmm0
; SSE-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm2, %xmm1
-; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; SSE-NEXT: fldl -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3],xmm9[3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0]
; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2
-; CHECK-NEXT: vmovaps %xmm15, %xmm1
-; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9
+; CHECK-NEXT: vmovaps %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill
+; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm9
; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0
-; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8
+; CHECK-NEXT: vaddps %xmm15, %xmm15, %xmm8
; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3
; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm0
; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %xmm9, (%rsp)
+; CHECK-NEXT: vmovaps %xmm15, %xmm1
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq foo
; SSE41-LABEL: test14:
; SSE41: # BB#0: # %vector.ph
; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,0,1]
; SSE2-NEXT: movdqa %xmm1, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm8
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: movdqa %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm4, %xmm0
; SSE2-NEXT: pxor %xmm3, %xmm0
; SSSE3-NEXT: movdqa %xmm1, %xmm9
; SSSE3-NEXT: movdqa %xmm0, %xmm8
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSSE3-NEXT: movdqa %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pxor %xmm3, %xmm1
; SSSE3-NEXT: movdqa %xmm4, %xmm0
; SSSE3-NEXT: pxor %xmm3, %xmm0
; SSE2-NEXT: movdqa %xmm9, %xmm11
; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm8, %xmm10
+; SSE2-NEXT: movdqa %xmm1, %xmm10
; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
; SSSE3-NEXT: movdqa %xmm9, %xmm11
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSSE3-NEXT: movdqa %xmm8, %xmm10
+; SSSE3-NEXT: movdqa %xmm1, %xmm10
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
; CHECK-LABEL: @use_eax_before_prologue@8: # @use_eax_before_prologue
; CHECK: movl %ecx, %eax
-; CHECK: cmpl %edx, %eax
+; CHECK: cmpl %edx, %ecx
; CHECK: jge LBB1_2
; CHECK: pushl %eax
; CHECK: movl $4092, %eax
; SSE: # BB#0:
; SSE-NEXT: rsqrtss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: mulss %xmm2, %xmm2
+; SSE-NEXT: mulss %xmm1, %xmm2
; SSE-NEXT: mulss %xmm0, %xmm2
; SSE-NEXT: addss {{.*}}(%rip), %xmm2
; SSE-NEXT: mulss {{.*}}(%rip), %xmm1
; SSE: # BB#0:
; SSE-NEXT: rsqrtps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: mulps %xmm2, %xmm2
+; SSE-NEXT: mulps %xmm1, %xmm2
; SSE-NEXT: mulps %xmm0, %xmm2
; SSE-NEXT: addps {{.*}}(%rip), %xmm2
; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
; SSE-NEXT: rsqrtps %xmm0, %xmm3
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01]
; SSE-NEXT: movaps %xmm3, %xmm2
-; SSE-NEXT: mulps %xmm2, %xmm2
+; SSE-NEXT: mulps %xmm3, %xmm2
; SSE-NEXT: mulps %xmm0, %xmm2
; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00]
; SSE-NEXT: addps %xmm0, %xmm2
; SSE-NEXT: mulps %xmm3, %xmm2
; SSE-NEXT: rsqrtps %xmm1, %xmm5
; SSE-NEXT: movaps %xmm5, %xmm3
-; SSE-NEXT: mulps %xmm3, %xmm3
+; SSE-NEXT: mulps %xmm5, %xmm3
; SSE-NEXT: mulps %xmm1, %xmm3
; SSE-NEXT: addps %xmm0, %xmm3
; SSE-NEXT: mulps %xmm4, %xmm3
; X32-LABEL: test4:
; X32: # BB#0: # %entry
; X32-NEXT: movaps %xmm0, %xmm2
-; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
; X32-NEXT: addss %xmm1, %xmm0
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; X32-NEXT: subss %xmm1, %xmm2
; X64-LABEL: test4:
; X64: # BB#0: # %entry
; X64-NEXT: movaps %xmm0, %xmm2
-; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
; X64-NEXT: addss %xmm1, %xmm0
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; X64-NEXT: subss %xmm1, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm0, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1]
; SSE-NEXT: movaps %xmm1, %xmm4
-; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
; SSE-NEXT: subss %xmm4, %xmm3
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE-NEXT: addss %xmm0, %xmm4
; CHECK-NEXT: Lcfi11:
; CHECK-NEXT: .cfi_offset %rbx, -16
; CHECK-NEXT: movl %edi, %ebx
-; CHECK-NEXT: movl %ebx, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp)
; CHECK-NEXT: callq _baz
; CHECK-NEXT: Ltmp6:
; CHECK-NEXT: callq _bar
; CHECK: .byte 1
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 4
-; CHECK-NEXT: .short 6
+; CHECK-NEXT: .short 5
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
; CHECK: .byte 1
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 4
-; CHECK-NEXT: .short 3
+; CHECK-NEXT: .short 4
; CHECK-NEXT: .short 0
; CHECK-NEXT: .long 0
; CHECK: Ltmp2-_test2
gc "statepoint-example" {
; CHECK-LABEL: back_to_back_deopt
; The exact stores don't matter, but there need to be three stack slots created
-; CHECK-DAG: movl %ebx, 12(%rsp)
-; CHECK-DAG: movl %ebp, 8(%rsp)
-; CHECK-DAG: movl %r14d, 4(%rsp)
+; CHECK-DAG: movl %edi, 12(%rsp)
+; CHECK-DAG: movl %esi, 8(%rsp)
+; CHECK-DAG: movl %edx, 4(%rsp)
; CHECK: callq
; CHECK-DAG: movl %ebx, 12(%rsp)
; CHECK-DAG: movl %ebp, 8(%rsp)
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movq %rax, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-LABEL: fptoui_4f32_to_4i32:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
; SSE-NEXT: movd %eax, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE-NEXT: cvttss2si %xmm2, %rax
; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: movaps %xmm2, %xmm3
-; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm2[1],xmm3[1]
; SSE-NEXT: cvttss2si %xmm3, %rax
; SSE-NEXT: movd %eax, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3]
; SSE-NEXT: cvttss2si %xmm2, %rax
; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: movaps %xmm1, %xmm3
-; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
+; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
; SSE-NEXT: cvttss2si %xmm3, %rax
; SSE-NEXT: movd %eax, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSE-NEXT: cmovaeq %rcx, %rdx
; SSE-NEXT: movq %rdx, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx
; SSE-NEXT: movq %rdx, %xmm3
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx
; SSE-NEXT: cmovaeq %rcx, %rdx
; SSE-NEXT: movq %rdx, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx
; SSE-NEXT: movq %rdx, %xmm3
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movaps %xmm0, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: subss %xmm1, %xmm4
; SSE-NEXT: cvttss2si %xmm4, %rcx
; SSE-LABEL: uitofp_2i64_to_4f32:
; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
+; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB39_1
; SSE-NEXT: # BB#2:
; SSE-LABEL: uitofp_4i64_to_4f32_undef:
; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
+; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB41_1
; SSE-NEXT: # BB#2:
; SSE42: # BB#0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm1, %xmm3
-; SSE42-NEXT: pcmpgtq %xmm2, %xmm3
+; SSE42-NEXT: pcmpgtq %xmm0, %xmm3
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
; SSE42-NEXT: pxor %xmm3, %xmm0
; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; X32: # BB#0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm2
; X32-NEXT: psllw $5, %xmm1
-; X32-NEXT: movdqa %xmm2, %xmm3
+; X32-NEXT: movdqa %xmm0, %xmm3
; X32-NEXT: psllw $4, %xmm3
; X32-NEXT: pand {{\.LCPI.*}}, %xmm3
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X32-NEXT: movdqa %xmm2, %xmm3
-; X32-NEXT: paddb %xmm3, %xmm3
+; X32-NEXT: paddb %xmm2, %xmm3
; X32-NEXT: paddb %xmm1, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X64: # BB#0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: psllw $5, %xmm1
-; X64-NEXT: movdqa %xmm2, %xmm3
+; X64-NEXT: movdqa %xmm0, %xmm3
; X64-NEXT: psllw $4, %xmm3
; X64-NEXT: pand {{.*}}(%rip), %xmm3
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X64-NEXT: movdqa %xmm2, %xmm3
-; X64-NEXT: paddb %xmm3, %xmm3
+; X64-NEXT: paddb %xmm2, %xmm3
; X64-NEXT: paddb %xmm1, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pxor %xmm3, %xmm3
-; SSE41-NEXT: psubd %xmm2, %xmm3
+; SSE41-NEXT: psubd %xmm0, %xmm3
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3
; SSE41-NEXT: movaps %xmm3, %xmm0
; SSE2-LABEL: test_div7_16i8:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: pmullw %xmm3, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
; SSE2-LABEL: test_rem7_16i8:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: pmullw %xmm3, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: paddb %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: psraw $8, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
; SSE2-NEXT: pmullw %xmm3, %xmm2
; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
; X86: # BB#0:
; X86-NEXT: movdqa %xmm0, %xmm1
-; X86-NEXT: movdqa %xmm1, %xmm2
+; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: psllw $4, %xmm2
; X86-NEXT: pand {{\.LCPI.*}}, %xmm2
; X86-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640]
; X86-NEXT: paddb %xmm0, %xmm0
; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; X86-NEXT: movdqa %xmm1, %xmm2
-; X86-NEXT: paddb %xmm2, %xmm2
+; X86-NEXT: paddb %xmm1, %xmm2
; X86-NEXT: paddb %xmm0, %xmm0
; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; X86-NEXT: movdqa %xmm1, %xmm0
; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
; X64: # BB#0:
; X64-NEXT: movdqa %xmm0, %xmm1
-; X64-NEXT: movdqa %xmm1, %xmm2
+; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: psllw $4, %xmm2
; X64-NEXT: pand {{.*}}(%rip), %xmm2
; X64-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640]
; X64-NEXT: paddb %xmm0, %xmm0
; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm2
-; X64-NEXT: paddb %xmm2, %xmm2
+; X64-NEXT: paddb %xmm1, %xmm2
; X64-NEXT: paddb %xmm0, %xmm0
; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: paddw %xmm4, %xmm4
+; SSE41-NEXT: paddw %xmm1, %xmm4
; SSE41-NEXT: movdqa %xmm3, %xmm6
; SSE41-NEXT: psllw $8, %xmm6
; SSE41-NEXT: movdqa %xmm3, %xmm5
; SSE41-NEXT: psllw $4, %xmm2
; SSE41-NEXT: por %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: paddw %xmm1, %xmm1
+; SSE41-NEXT: paddw %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: psrlw $8, %xmm4
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; SSE41-NEXT: psubb %xmm3, %xmm2
; SSE41-NEXT: psllw $5, %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm5
+; SSE41-NEXT: movdqa %xmm0, %xmm5
; SSE41-NEXT: psllw $4, %xmm5
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
-; SSE41-NEXT: movdqa %xmm1, %xmm4
+; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm5
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: paddb %xmm5, %xmm5
+; SSE41-NEXT: paddb %xmm4, %xmm5
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
; SSE41-NEXT: psllw $5, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddb %xmm3, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm5
; SSE41-NEXT: psrlw $4, %xmm5
; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
; SSE41-LABEL: constant_rotate_v16i8:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: psllw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256]
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddb %xmm3, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm3
; SSSE3-LABEL: sext_16i8_to_8i32:
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,4,u,u,u,5,u,u,u,6,u,u,u,7]
; SSSE3-LABEL: sext_16i8_to_16i32:
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
; SSSE3-LABEL: sext_16i8_to_4i64:
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: psrad $31, %xmm2
; SSE2-LABEL: sext_16i8_to_8i64:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: psrad $31, %xmm3
-; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: psrad $31, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSSE3-NEXT: movdqa %xmm1, %xmm2
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: psrad $31, %xmm3
-; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
; SSSE3-NEXT: psrad $31, %xmm4
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddw %xmm3, %xmm3
+; SSE41-NEXT: paddw %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psraw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddw %xmm3, %xmm3
+; SSE41-NEXT: paddw %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psrlw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: psrlw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pshufb %xmm0, %xmm1
; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddb %xmm3, %xmm3
+; SSE41-NEXT: paddb %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psrlw $4, %xmm4
; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
; SSE41-LABEL: constant_shift_v16i8:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psrlw $4, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE41-NEXT: psllw $4, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddw %xmm3, %xmm3
+; SSE41-NEXT: paddw %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psllw $8, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: psllw $4, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddb %xmm3, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: pshufb %xmm0, %xmm1
; SSE41-NEXT: psllw $5, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddb %xmm3, %xmm3
+; SSE41-NEXT: paddb %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psllw $4, %xmm4
; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: paddb %xmm1, %xmm1
+; SSE41-NEXT: paddb %xmm2, %xmm1
; SSE41-NEXT: paddb %xmm3, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-LABEL: constant_shift_v16i8:
; SSE41: # BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psllw $4, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: paddb %xmm2, %xmm2
+; SSE41-NEXT: paddb %xmm1, %xmm2
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE-LABEL: PR22377:
; SSE: # BB#0: # %entry
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3,1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2]
; SSE-NEXT: addps %xmm0, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-LABEL: mul_add_const_v4i64_v4i32:
; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4
-; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pxor %xmm4, %xmm4
-; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE41-NEXT: retq
;
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE41-NEXT: retq
;
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: movdqa %xmm3, %xmm12
; SSE2-NEXT: pcmpgtb %xmm7, %xmm12
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm13, %xmm3
-; SSE2-NEXT: movdqa %xmm9, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm14
; SSE2-NEXT: pcmpgtb %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa %xmm7, %xmm12
-; SSE2-NEXT: pcmpgtb %xmm8, %xmm12
+; SSE2-NEXT: pcmpgtb %xmm3, %xmm12
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm3
; SSE2-NEXT: movdqa %xmm6, %xmm13
-; SSE2-NEXT: pcmpgtb %xmm9, %xmm13
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm3, %xmm8
; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: movdqa %xmm3, %xmm12
; SSE2-NEXT: pcmpgtd %xmm7, %xmm12
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm13, %xmm3
-; SSE2-NEXT: movdqa %xmm9, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm14
; SSE2-NEXT: pcmpgtd %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm10
; SSE2-NEXT: movdqa %xmm7, %xmm12
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm12
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
; SSE2-NEXT: movdqa %xmm12, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm3
; SSE2-NEXT: movdqa %xmm6, %xmm13
-; SSE2-NEXT: pcmpgtd %xmm9, %xmm13
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14
; SSE2-LABEL: test122:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test124:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
-; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test126:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test128:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
-; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm13, %xmm9
-; SSE2-NEXT: movdqa %xmm8, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm14
; SSE2-NEXT: pcmpgtb %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2
; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm0, %xmm9
; SSE2-NEXT: movdqa %xmm6, %xmm13
-; SSE2-NEXT: pcmpgtb %xmm8, %xmm13
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14
; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm13, %xmm9
-; SSE2-NEXT: movdqa %xmm8, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm14
; SSE2-NEXT: pcmpgtd %xmm6, %xmm14
; SSE2-NEXT: movdqa %xmm14, %xmm2
; SSE2-NEXT: pxor %xmm13, %xmm2
; SSE2-NEXT: movdqa %xmm12, %xmm9
; SSE2-NEXT: pxor %xmm0, %xmm9
; SSE2-NEXT: movdqa %xmm6, %xmm13
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm13
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm13
; SSE2-NEXT: movdqa %xmm13, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm5, %xmm14
; SSE2-LABEL: test154:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test156:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
-; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test158:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-LABEL: test160:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm7, %xmm11
-; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE4: # BB#0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm3
-; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
+; SSE4-NEXT: pcmpgtq %xmm0, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0
; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE4: # BB#0: # %entry
; SSE4-NEXT: movdqa %xmm0, %xmm2
; SSE4-NEXT: movdqa %xmm1, %xmm3
-; SSE4-NEXT: pcmpgtq %xmm2, %xmm3
+; SSE4-NEXT: pcmpgtq %xmm0, %xmm3
; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
; SSE4-NEXT: pxor %xmm3, %xmm0
; SSE4-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-SSE2-NEXT: movss %xmm0, (%eax)
; X86-SSE2-NEXT: movaps %xmm0, %xmm1
-; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; X86-SSE2-NEXT: movss %xmm1, 8(%eax)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X86-SSE2-NEXT: movss %xmm0, 4(%eax)
; X86-SSE2-NEXT: movups %xmm0, (%eax)
; X86-SSE2-NEXT: movss %xmm2, 16(%eax)
; X86-SSE2-NEXT: movaps %xmm2, %xmm0
-; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm2[1],xmm0[1]
; X86-SSE2-NEXT: movss %xmm0, 24(%eax)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
; X86-SSE2-NEXT: movss %xmm2, 20(%eax)
; X86-SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-SSE2-NEXT: movss %xmm0, (%eax)
; X86-SSE2-NEXT: movaps %xmm0, %xmm1
-; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; X86-SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; X86-SSE2-NEXT: movss %xmm1, 8(%eax)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X86-SSE2-NEXT: movss %xmm0, 4(%eax)
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3]
; AVX1-NEXT: vmovdqa %xmm8, %xmm1
-; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm11, %ymm0
; AVX1-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
; Compare the arguments and jump to exit.
; After the prologue is set.
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
-; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
+; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Store %a in the alloca.
; Compare the arguments and jump to exit.
; After the prologue is set.
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
-; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
+; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Prologue code.
; Compare the arguments and jump to exit.
; After the prologue is set.
; CHECK: movl %edi, [[ARG0CPY:%e[a-z]+]]
-; CHECK-NEXT: cmpl %esi, [[ARG0CPY]]
+; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Prologue code.
; Compare the arguments and jump to exit.
; No prologue needed.
; ENABLE: movl %edi, [[ARG0CPY:%e[a-z]+]]
-; ENABLE-NEXT: cmpl %esi, [[ARG0CPY]]
+; ENABLE-NEXT: cmpl %esi, %edi
; ENABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Prologue code.
; Compare the arguments and jump to exit.
; After the prologue is set.
; DISABLE: movl %edi, [[ARG0CPY:%e[a-z]+]]
-; DISABLE-NEXT: cmpl %esi, [[ARG0CPY]]
+; DISABLE-NEXT: cmpl %esi, %edi
; DISABLE-NEXT: jge [[EXIT_LABEL:LBB[0-9_]+]]
;
; Store %a in the alloca.
; CHECK: .debug_loc contents:
; CHECK-NEXT: 0x00000000:
-; CHECK-NEXT: 0x000000000000001f - 0x000000000000003c: DW_OP_reg3 RBX
+; CHECK-NEXT: 0x000000000000001f - 0x000000000000005a: DW_OP_reg3 RBX
; We should only have one entry
; CHECK-NOT: :
; CHECK: callq g
; CHECK: movl %eax, %[[CSR:[^ ]*]]
; CHECK: #DEBUG_VALUE: f:y <- %ESI
-; CHECK: movl %[[CSR]], %ecx
+; CHECK: movl %eax, %ecx
; CHECK: callq g
; CHECK: movl %[[CSR]], %ecx
; CHECK: callq g