]> granicus.if.org Git - llvm/commitdiff
[AArch64][GlobalISel] Implement selection og G_MERGE of two s32s into s64.
authorAmara Emerson <aemerson@apple.com>
Thu, 20 Dec 2018 01:11:04 +0000 (01:11 +0000)
committerAmara Emerson <aemerson@apple.com>
Thu, 20 Dec 2018 01:11:04 +0000 (01:11 +0000)
This code pattern is an unfortunate side effect of the way some types get split
at call lowering. Ideally we'd either not generate it at all or combine it away
in the legalizer artifact combiner.

Until then, add selection support anyway which is a significant proportion of
our current fallbacks on CTMark.

rdar://46491420

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@349712 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/AArch64/AArch64InstructionSelector.cpp
test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir [new file with mode: 0644]

index 6cbfb6ab161b25e4ce8526fd7007fcc5e6eb78ed..cbb2e5c39d3d8e676738850f1240a684f0c20868 100644 (file)
@@ -73,6 +73,7 @@ private:
                           MachineBasicBlock::iterator MBBI,
                           MachineRegisterInfo &MRI) const;
   bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const;
+  bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI) const;
 
   ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
 
@@ -1553,6 +1554,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
   }
   case TargetOpcode::G_BUILD_VECTOR:
     return selectBuildVector(I, MRI);
+  case TargetOpcode::G_MERGE_VALUES:
+    return selectMergeValues(I, MRI);
   }
 
   return false;
@@ -1590,6 +1593,53 @@ bool AArch64InstructionSelector::emitScalarToVector(
   }
 }
 
+bool AArch64InstructionSelector::selectMergeValues(
+    MachineInstr &I, MachineRegisterInfo &MRI) const {
+  assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode");
+  const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
+  const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
+  assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation");
+
+  // At the moment we only support merging two s32s into an s64.
+  if (I.getNumOperands() != 3)
+    return false;
+  if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32)
+    return false;
+  const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
+  if (RB.getID() != AArch64::GPRRegBankID)
+    return false;
+
+  auto *DstRC = &AArch64::GPR64RegClass;
+  unsigned SubToRegDef = MRI.createVirtualRegister(DstRC);
+  MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
+                                    TII.get(TargetOpcode::SUBREG_TO_REG))
+                                .addDef(SubToRegDef)
+                                .addImm(0)
+                                .addUse(I.getOperand(1).getReg())
+                                .addImm(AArch64::sub_32);
+  unsigned SubToRegDef2 = MRI.createVirtualRegister(DstRC);
+  // Need to anyext the second scalar before we can use bfm
+  MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
+                                    TII.get(TargetOpcode::SUBREG_TO_REG))
+                                .addDef(SubToRegDef2)
+                                .addImm(0)
+                                .addUse(I.getOperand(2).getReg())
+                                .addImm(AArch64::sub_32);
+  unsigned BFMDef = MRI.createVirtualRegister(DstRC);
+  MachineInstr &BFM =
+      *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri))
+           .addDef(BFMDef)
+           .addUse(SubToRegDef)
+           .addUse(SubToRegDef2)
+           .addImm(32)
+           .addImm(31);
+  constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI);
+  constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI);
+  constrainSelectedInstRegOperands(BFM, TII, TRI, RBI);
+  I.eraseFromParent();
+  return true;
+}
+
 bool AArch64InstructionSelector::selectBuildVector(
     MachineInstr &I, MachineRegisterInfo &MRI) const {
   assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
diff --git a/test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir b/test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir
new file mode 100644 (file)
index 0000000..292e5fe
--- /dev/null
@@ -0,0 +1,34 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+--- |
+  target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+  define void @gmerge_s64_s32() { ret void }
+...
+
+---
+name:            gmerge_s64_s32
+legalized:       true
+regBankSelected: true
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: gmerge_s64_s32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32all(s32) = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all(s32) = COPY $w1
+    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]](s32), %subreg.sub_32
+    ; CHECK: [[SUBREG_TO_REG1:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY1]](s32), %subreg.sub_32
+    ; CHECK: [[BFMXri:%[0-9]+]]:gpr64 = BFMXri [[SUBREG_TO_REG]], [[SUBREG_TO_REG1]], 32, 31
+    ; CHECK: $x0 = COPY %2:gpr(s64)
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
+    %2(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+    $x0 = COPY %2(s64)
+...