From: Tim Northover Date: Thu, 16 Jun 2016 01:42:25 +0000 (+0000) Subject: AArch64: allow MOV (imm) alias to be printed X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=bde073f537afb691a4b3aa6ab41060ecff56280f;p=llvm AArch64: allow MOV (imm) alias to be printed The backend has been around for years, it's pretty ridiculous that we can't even use the preferred form for printing "MOV" aliases. Unfortunately, TableGen can't handle the complex predicates when printing so it's a bunch of nasty C++. Oh well. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@272865 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp index e43fc2cc784..432c7217c13 100644 --- a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -868,14 +868,7 @@ public: if (!CE) return false; uint64_t Value = CE->getValue(); - if (RegWidth == 32) - Value &= 0xffffffffULL; - - // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0". - if (Value == 0 && Shift != 0) - return false; - - return (Value & ~(0xffffULL << Shift)) == 0; + return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); } template @@ -886,16 +879,7 @@ public: if (!CE) return false; uint64_t Value = CE->getValue(); - // MOVZ takes precedence over MOVN. - for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16) - if ((Value & ~(0xffffULL << MOVZShift)) == 0) - return false; - - Value = ~Value; - if (RegWidth == 32) - Value &= 0xffffffffULL; - - return (Value & ~(0xffffULL << Shift)) == 0; + return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth); } bool isFPImm() const { return Kind == k_FPImm; } diff --git a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp index 06883712e7a..a3669cf18e7 100644 --- a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp +++ b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp @@ -219,6 +219,54 @@ void AArch64InstPrinter::printInst(const MCInst *MI, raw_ostream &O, return; } + // MOVZ, MOVN and "ORR wzr, #imm" instructions are aliases for MOV, but their + // domains overlap so they need to be prioritized. The chain is "MOVZ lsl #0 > + // MOVZ lsl #N > MOVN lsl #0 > MOVN lsl #N > ORR". The highest instruction + // that can represent the move is the MOV alias, and the rest get printed + // normally. + if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi) && + MI->getOperand(1).isImm() && MI->getOperand(2).isImm()) { + int RegWidth = Opcode == AArch64::MOVZXi ? 64 : 32; + int Shift = MI->getOperand(2).getImm(); + uint64_t Value = (uint64_t)MI->getOperand(1).getImm() << Shift; + + if (AArch64_AM::isMOVZMovAlias(Value, Shift, + Opcode == AArch64::MOVZXi ? 64 : 32)) { + O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #" + << formatImm(SignExtend64(Value, RegWidth)); + return; + } + } + + if ((Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) && + MI->getOperand(1).isImm() && MI->getOperand(2).isImm()) { + int RegWidth = Opcode == AArch64::MOVNXi ? 64 : 32; + int Shift = MI->getOperand(2).getImm(); + uint64_t Value = ~((uint64_t)MI->getOperand(1).getImm() << Shift); + if (RegWidth == 32) + Value = Value & 0xffffffff; + + if (AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth)) { + O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #" + << formatImm(SignExtend64(Value, RegWidth)); + return; + } + } + + if ((Opcode == AArch64::ORRXri || Opcode == AArch64::ORRWri) && + (MI->getOperand(1).getReg() == AArch64::XZR || + MI->getOperand(1).getReg() == AArch64::WZR) && + MI->getOperand(2).isImm()) { + int RegWidth = Opcode == AArch64::ORRXri ? 64 : 32; + uint64_t Value = AArch64_AM::decodeLogicalImmediate( + MI->getOperand(2).getImm(), RegWidth); + if (!AArch64_AM::isAnyMOVWMovAlias(Value, RegWidth)) { + O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #" + << formatImm(SignExtend64(Value, RegWidth)); + return; + } + } + if (!printAliasInstr(MI, STI, O)) printInstruction(MI, STI, O); diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h index 648b1dfc8c5..3e5ef4df470 100644 --- a/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h +++ b/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h @@ -753,6 +753,49 @@ static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) { return (EncVal << 32) | EncVal; } +inline static bool isAnyMOVZMovAlias(uint64_t Value, int RegWidth) { + for (int Shift = 0; Shift <= RegWidth - 16; Shift += 16) + if ((Value & ~(0xffffULL << Shift)) == 0) + return true; + + return false; +} + +inline static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth) { + if (RegWidth == 32) + Value &= 0xffffffffULL; + + // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0". + if (Value == 0 && Shift != 0) + return false; + + return (Value & ~(0xffffULL << Shift)) == 0; +} + +inline static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth) { + // MOVZ takes precedence over MOVN. + if (isAnyMOVZMovAlias(Value, RegWidth)) + return false; + + Value = ~Value; + if (RegWidth == 32) + Value &= 0xffffffffULL; + + return isMOVZMovAlias(Value, Shift, RegWidth); +} + +inline static bool isAnyMOVWMovAlias(uint64_t Value, int RegWidth) { + if (isAnyMOVZMovAlias(Value, RegWidth)) + return true; + + // It's not a MOVZ, but it might be a MOVN. + Value = ~Value; + if (RegWidth == 32) + Value &= 0xffffffffULL; + + return isAnyMOVZMovAlias(Value, RegWidth); +} + } // end namespace AArch64_AM } // end namespace llvm diff --git a/test/CodeGen/AArch64/aarch64-smull.ll b/test/CodeGen/AArch64/aarch64-smull.ll index ec0e2de92d0..1c8d13a00b2 100644 --- a/test/CodeGen/AArch64/aarch64-smull.ll +++ b/test/CodeGen/AArch64/aarch64-smull.ll @@ -234,7 +234,7 @@ define <8 x i16> @smull_extvec_v8i8_v8i16(<8 x i8> %arg) nounwind { define <8 x i16> @smull_noextvec_v8i8_v8i16(<8 x i8> %arg) nounwind { ; Do not use SMULL if the BUILD_VECTOR element values are too big. ; CHECK-LABEL: smull_noextvec_v8i8_v8i16: -; CHECK: movz +; CHECK: mov ; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h %tmp3 = sext <8 x i8> %arg to <8 x i16> %tmp4 = mul <8 x i16> %tmp3, @@ -268,7 +268,7 @@ define <8 x i16> @umull_extvec_v8i8_v8i16(<8 x i8> %arg) nounwind { define <8 x i16> @umull_noextvec_v8i8_v8i16(<8 x i8> %arg) nounwind { ; Do not use SMULL if the BUILD_VECTOR element values are too big. ; CHECK-LABEL: umull_noextvec_v8i8_v8i16: -; CHECK: movz +; CHECK: mov ; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h %tmp3 = zext <8 x i8> %arg to <8 x i16> %tmp4 = mul <8 x i16> %tmp3, diff --git a/test/CodeGen/AArch64/arm64-abi_align.ll b/test/CodeGen/AArch64/arm64-abi_align.ll index 96ea658209a..e76adb4abc0 100644 --- a/test/CodeGen/AArch64/arm64-abi_align.ll +++ b/test/CodeGen/AArch64/arm64-abi_align.ll @@ -74,7 +74,7 @@ define i32 @caller38_stack() #1 { entry: ; CHECK-LABEL: caller38_stack ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8] -; CHECK: movz w[[C:[0-9]+]], #9 +; CHECK: mov w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] %0 = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4 %1 = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 @@ -128,7 +128,7 @@ entry: ; CHECK-LABEL: caller39_stack ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #32] ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16] -; CHECK: movz w[[C:[0-9]+]], #9 +; CHECK: mov w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] %0 = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16 %1 = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 @@ -184,7 +184,7 @@ entry: ; CHECK-LABEL: caller40_stack ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #24] ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8] -; CHECK: movz w[[C:[0-9]+]], #9 +; CHECK: mov w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] %0 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 %1 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 @@ -238,7 +238,7 @@ entry: ; CHECK-LABEL: caller41_stack ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #32] ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16] -; CHECK: movz w[[C:[0-9]+]], #9 +; CHECK: mov w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 %1 = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 @@ -330,7 +330,7 @@ entry: ; CHECK: sub x[[A:[0-9]+]], x29, #32 ; Address of s1 is passed on stack at sp+8 ; CHECK: str x[[A]], [sp, #8] -; CHECK: movz w[[C:[0-9]+]], #9 +; CHECK: mov w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] ; FAST-LABEL: caller42_stack @@ -442,7 +442,7 @@ entry: ; CHECK: sub x[[A:[0-9]+]], x29, #32 ; Address of s1 is passed on stack at sp+8 ; CHECK: str x[[A]], [sp, #8] -; CHECK: movz w[[C:[0-9]+]], #9 +; CHECK: mov w[[C:[0-9]+]], #9 ; CHECK: str w[[C]], [sp] ; FAST-LABEL: caller43_stack diff --git a/test/CodeGen/AArch64/arm64-addrmode.ll b/test/CodeGen/AArch64/arm64-addrmode.ll index fb952f47c0b..0e651a910d7 100644 --- a/test/CodeGen/AArch64/arm64-addrmode.ll +++ b/test/CodeGen/AArch64/arm64-addrmode.ll @@ -82,7 +82,7 @@ define void @t7(i64 %a) { define void @t8(i64 %a) { ; CHECK-LABEL: t8: -; CHECK: movn [[REG:x[0-9]+]], #4661 +; CHECK: mov [[REG:x[0-9]+]], #-4662 ; CHECK-NEXT: ldr xzr, [x0, [[REG]]] %1 = sub i64 %a, 4662 ;-4662 is 0xffffffffffffedca %2 = inttoptr i64 %1 to i64* @@ -92,7 +92,7 @@ define void @t8(i64 %a) { define void @t9(i64 %a) { ; CHECK-LABEL: t9: -; CHECK: movn [[REG:x[0-9]+]], #4661, lsl #16 +; CHECK: mov [[REG:x[0-9]+]], #-305463297 ; CHECK-NEXT: ldr xzr, [x0, [[REG]]] %1 = add i64 -305463297, %a ;-305463297 is 0xffffffffedcaffff %2 = inttoptr i64 %1 to i64* @@ -102,7 +102,7 @@ define void @t9(i64 %a) { define void @t10(i64 %a) { ; CHECK-LABEL: t10: -; CHECK: movz [[REG:x[0-9]+]], #291, lsl #48 +; CHECK: mov [[REG:x[0-9]+]], #81909218222800896 ; CHECK-NEXT: ldr xzr, [x0, [[REG]]] %1 = add i64 %a, 81909218222800896 ;0x123000000000000 %2 = inttoptr i64 %1 to i64* @@ -112,7 +112,7 @@ define void @t10(i64 %a) { define void @t11(i64 %a) { ; CHECK-LABEL: t11: -; CHECK: movz w[[NUM:[0-9]+]], #291, lsl #16 +; CHECK: mov w[[NUM:[0-9]+]], #19070976 ; CHECK: movk w[[NUM:[0-9]+]], #17767 ; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]] %1 = add i64 %a, 19088743 ;0x1234567 diff --git a/test/CodeGen/AArch64/arm64-atomic.ll b/test/CodeGen/AArch64/arm64-atomic.ll index 9362205dce0..fef137b1023 100644 --- a/test/CodeGen/AArch64/arm64-atomic.ll +++ b/test/CodeGen/AArch64/arm64-atomic.ll @@ -103,7 +103,7 @@ define i64 @fetch_and_nand_64(i64* %p) #0 { define i32 @fetch_and_or(i32* %p) #0 { ; CHECK-LABEL: fetch_and_or: -; CHECK: movz [[OLDVAL_REG:w[0-9]+]], #5 +; CHECK: mov [[OLDVAL_REG:w[0-9]+]], #5 ; CHECK: [[TRYBB:.?LBB[0-9_]+]]: ; CHECK: ldaxr w[[DEST_REG:[0-9]+]], [x0] ; CHECK: orr [[SCRATCH2_REG:w[0-9]+]], w[[DEST_REG]], [[OLDVAL_REG]] diff --git a/test/CodeGen/AArch64/arm64-bitfield-extract.ll b/test/CodeGen/AArch64/arm64-bitfield-extract.ll index 7f8a7f73498..402e16ccdb2 100644 --- a/test/CodeGen/AArch64/arm64-bitfield-extract.ll +++ b/test/CodeGen/AArch64/arm64-bitfield-extract.ll @@ -348,7 +348,7 @@ entry: ; CHECK-LABEL: fct16: ; CHECK: ldr [[REG1:w[0-9]+]], ; Create the constant -; CHECK: movz [[REGCST:w[0-9]+]], #26, lsl #16 +; CHECK: mov [[REGCST:w[0-9]+]], #1703936 ; CHECK: movk [[REGCST]], #33120 ; Do the masking ; CHECK: and [[REG2:w[0-9]+]], [[REG1]], [[REGCST]] @@ -377,7 +377,7 @@ entry: ; CHECK-LABEL: fct17: ; CHECK: ldr [[REG1:x[0-9]+]], ; Create the constant -; CHECK: movz w[[REGCST:[0-9]+]], #26, lsl #16 +; CHECK: mov w[[REGCST:[0-9]+]], #1703936 ; CHECK: movk w[[REGCST]], #33120 ; Do the masking ; CHECK: and [[REG2:x[0-9]+]], [[REG1]], x[[REGCST]] diff --git a/test/CodeGen/AArch64/arm64-build-vector.ll b/test/CodeGen/AArch64/arm64-build-vector.ll index b6fa7a44850..1a6c3687dcb 100644 --- a/test/CodeGen/AArch64/arm64-build-vector.ll +++ b/test/CodeGen/AArch64/arm64-build-vector.ll @@ -36,7 +36,7 @@ define <4 x float> @foo(float %a, float %b, float %c, float %d) nounwind { define <8 x i16> @build_all_zero(<8 x i16> %a) #1 { ; CHECK-LABEL: build_all_zero: -; CHECK: movz w[[GREG:[0-9]+]], #44672 +; CHECK: mov w[[GREG:[0-9]+]], #44672 ; CHECK-NEXT: fmov s[[FREG:[0-9]+]], w[[GREG]] ; CHECK-NEXT: mul.8h v0, v0, v[[FREG]] %b = add <8 x i16> %a, diff --git a/test/CodeGen/AArch64/arm64-const-addr.ll b/test/CodeGen/AArch64/arm64-const-addr.ll index a6ab7478ca1..e55db290448 100644 --- a/test/CodeGen/AArch64/arm64-const-addr.ll +++ b/test/CodeGen/AArch64/arm64-const-addr.ll @@ -5,7 +5,7 @@ ; Test if the constant base address gets only materialized once. define i32 @test1() nounwind { ; CHECK-LABEL: test1 -; CHECK: movz w8, #1039, lsl #16 +; CHECK: mov w8, #68091904 ; CHECK-NEXT: movk w8, #49152 ; CHECK-NEXT: ldp w9, w10, [x8, #4] ; CHECK: ldr w8, [x8, #12] diff --git a/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll b/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll index 7ce914a4659..9dae7a6f5b6 100644 --- a/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll +++ b/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll @@ -6,10 +6,10 @@ ; Load an address with an offset larget then LDR imm can handle define i32 @foo() nounwind { entry: -; CHECK: @foo +; CHECK-LABEL: @foo ; CHECK: adrp x[[REG:[0-9]+]], _sortlist@GOTPAGE ; CHECK: ldr x[[REG1:[0-9]+]], [x[[REG]], _sortlist@GOTPAGEOFF] -; CHECK: movz x[[REG2:[0-9]+]], #20000 +; CHECK: mov x[[REG2:[0-9]+]], #20000 ; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]] ; CHECK: ldr w0, [x[[REG3]]] ; CHECK: ret @@ -19,10 +19,10 @@ entry: define i64 @foo2() nounwind { entry: -; CHECK: @foo2 +; CHECK-LABEL: @foo2 ; CHECK: adrp x[[REG:[0-9]+]], _sortlist2@GOTPAGE ; CHECK: ldr x[[REG1:[0-9]+]], [x[[REG]], _sortlist2@GOTPAGEOFF] -; CHECK: movz x[[REG2:[0-9]+]], #40000 +; CHECK: mov x[[REG2:[0-9]+]], #40000 ; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]] ; CHECK: ldr x0, [x[[REG3]]] ; CHECK: ret @@ -36,8 +36,8 @@ entry: define signext i8 @foo3() nounwind ssp { entry: -; CHECK: @foo3 -; CHECK: movz x[[REG:[0-9]+]], #2874, lsl #32 +; CHECK-LABEL: @foo3 +; CHECK: mov x[[REG:[0-9]+]], #12343736008704 ; CHECK: movk x[[REG]], #29646, lsl #16 ; CHECK: movk x[[REG]], #12274 %0 = load i8*, i8** @pd2, align 8 diff --git a/test/CodeGen/AArch64/arm64-fast-isel-gv.ll b/test/CodeGen/AArch64/arm64-fast-isel-gv.ll index 30f8516460a..85d000b8606 100644 --- a/test/CodeGen/AArch64/arm64-fast-isel-gv.ll +++ b/test/CodeGen/AArch64/arm64-fast-isel-gv.ll @@ -18,8 +18,8 @@ entry: ; CHECK: @Rand ; CHECK: adrp [[REG1:x[0-9]+]], _seed@GOTPAGE ; CHECK: ldr [[REG2:x[0-9]+]], {{\[}}[[REG1]], _seed@GOTPAGEOFF{{\]}} -; CHECK: movz [[REG3:x[0-9]+]], #13849 -; CHECK: movz [[REG4:x[0-9]+]], #1309 +; CHECK: mov [[REG3:x[0-9]+]], #13849 +; CHECK: mov [[REG4:x[0-9]+]], #1309 ; CHECK: ldr [[REG5:x[0-9]+]], {{\[}}[[REG2]]{{\]}} ; CHECK: mul [[REG6:x[0-9]+]], [[REG5]], [[REG4]] ; CHECK: add [[REG7:x[0-9]+]], [[REG6]], [[REG3]] diff --git a/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll b/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll index 1c777f5e48d..a8f30ad4777 100644 --- a/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll +++ b/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll @@ -8,7 +8,7 @@ define void @t1() { ; ARM64: adrp x8, _message@PAGE ; ARM64: add x0, x8, _message@PAGEOFF ; ARM64: mov w9, wzr -; ARM64: movz x2, #80 +; ARM64: mov x2, #80 ; ARM64: uxtb w1, w9 ; ARM64: bl _memset call void @llvm.memset.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i8 0, i64 80, i32 16, i1 false) @@ -23,7 +23,7 @@ define void @t2() { ; ARM64: ldr x0, [x8, _temp@GOTPAGEOFF] ; ARM64: adrp x8, _message@PAGE ; ARM64: add x1, x8, _message@PAGEOFF -; ARM64: movz x2, #80 +; ARM64: mov x2, #80 ; ARM64: bl _memcpy call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 80, i32 16, i1 false) ret void @@ -37,7 +37,7 @@ define void @t3() { ; ARM64: ldr x0, [x8, _temp@GOTPAGEOFF] ; ARM64: adrp x8, _message@PAGE ; ARM64: add x1, x8, _message@PAGEOFF -; ARM64: movz x2, #20 +; ARM64: mov x2, #20 ; ARM64: bl _memmove call void @llvm.memmove.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 20, i32 16, i1 false) ret void @@ -137,7 +137,7 @@ define void @t8() { define void @test_distant_memcpy(i8* %dst) { ; ARM64-LABEL: test_distant_memcpy: ; ARM64: mov [[ARRAY:x[0-9]+]], sp -; ARM64: movz [[OFFSET:x[0-9]+]], #8000 +; ARM64: mov [[OFFSET:x[0-9]+]], #8000 ; ARM64: add x[[ADDR:[0-9]+]], [[ARRAY]], [[OFFSET]] ; ARM64: ldrb [[BYTE:w[0-9]+]], [x[[ADDR]]] ; ARM64: strb [[BYTE]], [x0] diff --git a/test/CodeGen/AArch64/arm64-fp128.ll b/test/CodeGen/AArch64/arm64-fp128.ll index 6839bdde723..bcb196e4045 100644 --- a/test/CodeGen/AArch64/arm64-fp128.ll +++ b/test/CodeGen/AArch64/arm64-fp128.ll @@ -174,11 +174,11 @@ define i32 @test_br_cc() { iftrue: ret i32 42 ; CHECK-NEXT: BB# -; CHECK-NEXT: movz w0, #42 +; CHECK-NEXT: mov w0, #42 ; CHECK: ret iffalse: ret i32 29 -; CHECK: movz w0, #29 +; CHECK: mov w0, #29 ; CHECK: ret } diff --git a/test/CodeGen/AArch64/arm64-memcpy-inline.ll b/test/CodeGen/AArch64/arm64-memcpy-inline.ll index 7abe4c659c9..23e90100fb9 100644 --- a/test/CodeGen/AArch64/arm64-memcpy-inline.ll +++ b/test/CodeGen/AArch64/arm64-memcpy-inline.ll @@ -40,7 +40,7 @@ entry: define void @t2(i8* nocapture %C) nounwind { entry: ; CHECK-LABEL: t2: -; CHECK: movz [[REG3:w[0-9]+]] +; CHECK: mov [[REG3:w[0-9]+]] ; CHECK: movk [[REG3]], ; CHECK: str [[REG3]], [x0, #32] ; CHECK: ldp [[DEST1:q[0-9]+]], [[DEST2:q[0-9]+]], [x{{[0-9]+}}] @@ -75,9 +75,9 @@ define void @t5(i8* nocapture %C) nounwind { entry: ; CHECK-LABEL: t5: ; CHECK: strb wzr, [x0, #6] -; CHECK: movz [[REG7:w[0-9]+]], #21587 +; CHECK: mov [[REG7:w[0-9]+]], #21587 ; CHECK: strh [[REG7]], [x0, #4] -; CHECK: movz [[REG8:w[0-9]+]], +; CHECK: mov [[REG8:w[0-9]+]], ; CHECK: movk [[REG8]], ; CHECK: str [[REG8]], [x0] tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str5, i64 0, i64 0), i64 7, i32 1, i1 false) diff --git a/test/CodeGen/AArch64/arm64-movi.ll b/test/CodeGen/AArch64/arm64-movi.ll index e43b31015ac..344e2224ab4 100644 --- a/test/CodeGen/AArch64/arm64-movi.ll +++ b/test/CodeGen/AArch64/arm64-movi.ll @@ -7,21 +7,21 @@ ; 64-bit immed with 32-bit pattern size, rotated by 0. define i64 @test64_32_rot0() nounwind { ; CHECK-LABEL: test64_32_rot0: -; CHECK: orr x0, xzr, #0x700000007 +; CHECK: mov x0, #30064771079 ret i64 30064771079 } ; 64-bit immed with 32-bit pattern size, rotated by 2. define i64 @test64_32_rot2() nounwind { ; CHECK-LABEL: test64_32_rot2: -; CHECK: orr x0, xzr, #0xc0000003c0000003 +; CHECK: mov x0, #-4611686002321260541 ret i64 13835058071388291075 } ; 64-bit immed with 4-bit pattern size, rotated by 3. define i64 @test64_4_rot3() nounwind { ; CHECK-LABEL: test64_4_rot3: -; CHECK: orr x0, xzr, #0xeeeeeeeeeeeeeeee +; CHECK: mov x0, #-1229782938247303442 ret i64 17216961135462248174 } @@ -35,7 +35,7 @@ define i32 @test32_32_rot16() nounwind { ; 32-bit immed with 2-bit pattern size, rotated by 1. define i32 @test32_2_rot1() nounwind { ; CHECK-LABEL: test32_2_rot1: -; CHECK: orr w0, wzr, #0xaaaaaaaa +; CHECK: mov w0, #-1431655766 ret i32 2863311530 } @@ -45,13 +45,13 @@ define i32 @test32_2_rot1() nounwind { define i32 @movz() nounwind { ; CHECK-LABEL: movz: -; CHECK: movz w0, #5 +; CHECK: mov w0, #5 ret i32 5 } define i64 @movz_3movk() nounwind { ; CHECK-LABEL: movz_3movk: -; CHECK: movz x0, #5, lsl #48 +; CHECK: mov x0, #1407374883553280 ; CHECK-NEXT: movk x0, #4660, lsl #32 ; CHECK-NEXT: movk x0, #43981, lsl #16 ; CHECK-NEXT: movk x0, #22136 @@ -60,14 +60,14 @@ define i64 @movz_3movk() nounwind { define i64 @movz_movk_skip1() nounwind { ; CHECK-LABEL: movz_movk_skip1: -; CHECK: movz x0, #5, lsl #32 +; CHECK: mov x0, #21474836480 ; CHECK-NEXT: movk x0, #17185, lsl #16 ret i64 22601072640 } define i64 @movz_skip1_movk() nounwind { ; CHECK-LABEL: movz_skip1_movk: -; CHECK: movz x0, #34388, lsl #32 +; CHECK: mov x0, #147695335374848 ; CHECK-NEXT: movk x0, #4660 ret i64 147695335379508 } @@ -78,13 +78,13 @@ define i64 @movz_skip1_movk() nounwind { define i64 @movn() nounwind { ; CHECK-LABEL: movn: -; CHECK: movn x0, #41 +; CHECK: mov x0, #-42 ret i64 -42 } define i64 @movn_skip1_movk() nounwind { ; CHECK-LABEL: movn_skip1_movk: -; CHECK: movn x0, #41, lsl #32 +; CHECK: mov x0, #-176093659137 ; CHECK-NEXT: movk x0, #4660 ret i64 -176093720012 } @@ -96,28 +96,28 @@ define i64 @movn_skip1_movk() nounwind { define i64 @orr_movk1() nounwind { ; CHECK-LABEL: orr_movk1: -; CHECK: orr x0, xzr, #0xffff0000ffff0 +; CHECK: mov x0, #72056494543077120 ; CHECK: movk x0, #57005, lsl #16 ret i64 72056498262245120 } define i64 @orr_movk2() nounwind { ; CHECK-LABEL: orr_movk2: -; CHECK: orr x0, xzr, #0xffff0000ffff0 +; CHECK: mov x0, #72056494543077120 ; CHECK: movk x0, #57005, lsl #48 ret i64 -2400982650836746496 } define i64 @orr_movk3() nounwind { ; CHECK-LABEL: orr_movk3: -; CHECK: orr x0, xzr, #0xffff0000ffff0 +; CHECK: mov x0, #72056494543077120 ; CHECK: movk x0, #57005, lsl #32 ret i64 72020953688702720 } define i64 @orr_movk4() nounwind { ; CHECK-LABEL: orr_movk4: -; CHECK: orr x0, xzr, #0xffff0000ffff0 +; CHECK: mov x0, #72056494543077120 ; CHECK: movk x0, #57005 ret i64 72056494543068845 } @@ -125,14 +125,14 @@ define i64 @orr_movk4() nounwind { ; rdar://14987618 define i64 @orr_movk5() nounwind { ; CHECK-LABEL: orr_movk5: -; CHECK: orr x0, xzr, #0xff00ff00ff00ff00 +; CHECK: mov x0, #-71777214294589696 ; CHECK: movk x0, #57005, lsl #16 ret i64 -71777214836900096 } define i64 @orr_movk6() nounwind { ; CHECK-LABEL: orr_movk6: -; CHECK: orr x0, xzr, #0xff00ff00ff00ff00 +; CHECK: mov x0, #-71777214294589696 ; CHECK: movk x0, #57005, lsl #16 ; CHECK: movk x0, #57005, lsl #48 ret i64 -2400982647117578496 @@ -140,14 +140,14 @@ define i64 @orr_movk6() nounwind { define i64 @orr_movk7() nounwind { ; CHECK-LABEL: orr_movk7: -; CHECK: orr x0, xzr, #0xff00ff00ff00ff00 +; CHECK: mov x0, #-71777214294589696 ; CHECK: movk x0, #57005, lsl #48 ret i64 -2400982646575268096 } define i64 @orr_movk8() nounwind { ; CHECK-LABEL: orr_movk8: -; CHECK: orr x0, xzr, #0xff00ff00ff00ff00 +; CHECK: mov x0, #-71777214294589696 ; CHECK: movk x0, #57005 ; CHECK: movk x0, #57005, lsl #48 ret i64 -2400982646575276371 @@ -156,7 +156,7 @@ define i64 @orr_movk8() nounwind { ; rdar://14987715 define i64 @orr_movk9() nounwind { ; CHECK-LABEL: orr_movk9: -; CHECK: orr x0, xzr, #0xffffff000000000 +; CHECK: mov x0, #1152921435887370240 ; CHECK: movk x0, #65280 ; CHECK: movk x0, #57005, lsl #16 ret i64 1152921439623315200 @@ -164,14 +164,14 @@ define i64 @orr_movk9() nounwind { define i64 @orr_movk10() nounwind { ; CHECK-LABEL: orr_movk10: -; CHECK: orr x0, xzr, #0xfffffffffffff00 +; CHECK: mov x0, #1152921504606846720 ; CHECK: movk x0, #57005, lsl #16 ret i64 1152921504047824640 } define i64 @orr_movk11() nounwind { ; CHECK-LABEL: orr_movk11: -; CHECK: orr x0, xzr, #0xfff00000000000ff +; CHECK: mov x0, #-4503599627370241 ; CHECK: movk x0, #57005, lsl #16 ; CHECK: movk x0, #65535, lsl #32 ret i64 -4222125209747201 @@ -179,14 +179,14 @@ define i64 @orr_movk11() nounwind { define i64 @orr_movk12() nounwind { ; CHECK-LABEL: orr_movk12: -; CHECK: orr x0, xzr, #0xfff00000000000ff +; CHECK: mov x0, #-4503599627370241 ; CHECK: movk x0, #57005, lsl #32 ret i64 -4258765016661761 } define i64 @orr_movk13() nounwind { ; CHECK-LABEL: orr_movk13: -; CHECK: orr x0, xzr, #0xfffff000000 +; CHECK: mov x0, #17592169267200 ; CHECK: movk x0, #57005 ; CHECK: movk x0, #57005, lsl #48 ret i64 -2401245434149282131 @@ -195,7 +195,7 @@ define i64 @orr_movk13() nounwind { ; rdar://13944082 define i64 @g() nounwind { ; CHECK-LABEL: g: -; CHECK: movz x0, #65535, lsl #48 +; CHECK: mov x0, #-281474976710656 ; CHECK: movk x0, #2 entry: ret i64 -281474976710654 diff --git a/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll b/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll index fe290629d92..caf4498276c 100644 --- a/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll +++ b/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll @@ -4,7 +4,7 @@ ; RUN: llvm-objdump -triple arm64-apple-darwin -d %t | FileCheck %s --check-prefix CHECK-ENCODING ; CHECK-ENCODING-NOT: -; CHECK-ENCODING: movz x16, #65535, lsl #32 +; CHECK-ENCODING: mov x16, #281470681743360 ; CHECK-ENCODING: movk x16, #57005, lsl #16 ; CHECK-ENCODING: movk x16, #48879 @@ -17,7 +17,7 @@ entry: ; CHECK: str x{{.+}}, [sp] ; CHECK-NEXT: mov x0, x{{.+}} ; CHECK: Ltmp -; CHECK-NEXT: movz x16, #65535, lsl #32 +; CHECK-NEXT: mov x16, #281470681743360 ; CHECK: movk x16, #57005, lsl #16 ; CHECK: movk x16, #48879 ; CHECK-NEXT: blr x16 @@ -25,7 +25,7 @@ entry: ; FAST: Ltmp ; FAST: str x{{.+}}, [sp] ; FAST: Ltmp -; FAST-NEXT: movz x16, #65535, lsl #32 +; FAST-NEXT: mov x16, #281470681743360 ; FAST-NEXT: movk x16, #57005, lsl #16 ; FAST-NEXT: movk x16, #48879 ; FAST-NEXT: blr x16 @@ -48,7 +48,7 @@ entry: ; CHECK-NEXT: orr w[[REG:[0-9]+]], wzr, #0x2 ; CHECK-NEXT: str x[[REG]], [sp] ; CHECK: Ltmp -; CHECK-NEXT: movz x16, #65535, lsl #32 +; CHECK-NEXT: mov x16, #281470681743360 ; CHECK-NEXT: movk x16, #57005, lsl #16 ; CHECK-NEXT: movk x16, #48879 ; CHECK-NEXT: blr x16 @@ -61,7 +61,7 @@ entry: ; FAST-NEXT: str [[REG2]], [sp, #16] ; FAST-NEXT: str [[REG3]], [sp, #24] ; FAST: Ltmp -; FAST-NEXT: movz x16, #65535, lsl #32 +; FAST-NEXT: mov x16, #281470681743360 ; FAST-NEXT: movk x16, #57005, lsl #16 ; FAST-NEXT: movk x16, #48879 ; FAST-NEXT: blr x16 @@ -75,7 +75,7 @@ define i64 @jscall_patchpoint_codegen3(i64 %callee) { entry: ; CHECK-LABEL: jscall_patchpoint_codegen3: ; CHECK: Ltmp -; CHECK: movz w[[REG:[0-9]+]], #10 +; CHECK: mov w[[REG:[0-9]+]], #10 ; CHECK-NEXT: str x[[REG]], [sp, #48] ; CHECK-NEXT: orr w[[REG:[0-9]+]], wzr, #0x8 ; CHECK-NEXT: str w[[REG]], [sp, #36] @@ -86,7 +86,7 @@ entry: ; CHECK-NEXT: orr w[[REG:[0-9]+]], wzr, #0x2 ; CHECK-NEXT: str x[[REG]], [sp] ; CHECK: Ltmp -; CHECK-NEXT: movz x16, #65535, lsl #32 +; CHECK-NEXT: mov x16, #281470681743360 ; CHECK-NEXT: movk x16, #57005, lsl #16 ; CHECK-NEXT: movk x16, #48879 ; CHECK-NEXT: blr x16 @@ -96,14 +96,14 @@ entry: ; FAST-NEXT: orr [[REG2:w[0-9]+]], wzr, #0x4 ; FAST-NEXT: orr [[REG3:x[0-9]+]], xzr, #0x6 ; FAST-NEXT: orr [[REG4:w[0-9]+]], wzr, #0x8 -; FAST-NEXT: movz [[REG5:x[0-9]+]], #10 +; FAST-NEXT: mov [[REG5:x[0-9]+]], #10 ; FAST-NEXT: str [[REG1]], [sp] ; FAST-NEXT: str [[REG2]], [sp, #16] ; FAST-NEXT: str [[REG3]], [sp, #24] ; FAST-NEXT: str [[REG4]], [sp, #36] ; FAST-NEXT: str [[REG5]], [sp, #48] ; FAST: Ltmp -; FAST-NEXT: movz x16, #65535, lsl #32 +; FAST-NEXT: mov x16, #281470681743360 ; FAST-NEXT: movk x16, #57005, lsl #16 ; FAST-NEXT: movk x16, #48879 ; FAST-NEXT: blr x16 @@ -122,4 +122,3 @@ define webkit_jscc zeroext i16 @test_i16(i16 zeroext %a, i16 zeroext %b) { declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...) declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...) - diff --git a/test/CodeGen/AArch64/arm64-patchpoint.ll b/test/CodeGen/AArch64/arm64-patchpoint.ll index db125351fb4..2f9004bb22e 100644 --- a/test/CodeGen/AArch64/arm64-patchpoint.ll +++ b/test/CodeGen/AArch64/arm64-patchpoint.ll @@ -6,11 +6,11 @@ define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { entry: ; CHECK-LABEL: trivial_patchpoint_codegen: -; CHECK: movz x16, #57005, lsl #32 +; CHECK: mov x16, #244834610708480 ; CHECK-NEXT: movk x16, #48879, lsl #16 ; CHECK-NEXT: movk x16, #51966 ; CHECK-NEXT: blr x16 -; CHECK: movz x16, #57005, lsl #32 +; CHECK: mov x16, #244834610708480 ; CHECK-NEXT: movk x16, #48879, lsl #16 ; CHECK-NEXT: movk x16, #51967 ; CHECK-NEXT: blr x16 diff --git a/test/CodeGen/AArch64/arm64-register-pairing.ll b/test/CodeGen/AArch64/arm64-register-pairing.ll index ab7a8549cf5..eac7e5cb336 100644 --- a/test/CodeGen/AArch64/arm64-register-pairing.ll +++ b/test/CodeGen/AArch64/arm64-register-pairing.ll @@ -14,7 +14,7 @@ define void @odd() nounwind { ; CHECK: stp x24, x23, [sp, #96] ; CHECK: stp x22, x21, [sp, #112] ; CHECK: stp x20, x19, [sp, #128] -; CHECK: movz x0, #42 +; CHECK: mov x0, #42 ; CHECK: ldp x20, x19, [sp, #128] ; CHECK: ldp x22, x21, [sp, #112] ; CHECK: ldp x24, x23, [sp, #96] @@ -31,7 +31,7 @@ define void @odd() nounwind { ; CHECK-NOTMACHO: str x27, [sp, #32] ; CHECK-NOTMACHO: stp x25, x23, [sp, #48] ; CHECK-NOTMACHO: stp x21, x19, [sp, #64] -; CHECK-NOTMACHO: movz x0, #42 +; CHECK-NOTMACHO: mov x0, #42 ; CHECK-NOTMACHO: ldp x21, x19, [sp, #64] ; CHECK-NOTMACHO: ldp x25, x23, [sp, #48] ; CHECK-NOTMACHO: ldr x27, [sp, #32] @@ -52,7 +52,7 @@ define void @even() nounwind { ; CHECK: stp x24, x23, [sp, #96] ; CHECK: stp x22, x21, [sp, #112] ; CHECK: stp x20, x19, [sp, #128] -; CHECK: movz x0, #42 +; CHECK: mov x0, #42 ; CHECK: ldp x20, x19, [sp, #128] ; CHECK: ldp x22, x21, [sp, #112] ; CHECK: ldp x24, x23, [sp, #96] @@ -69,7 +69,7 @@ define void @even() nounwind { ; CHECK-NOTMACHO: str x28, [sp, #32] ; CHECK-NOTMACHO: stp x26, x24, [sp, #48] ; CHECK-NOTMACHO: stp x22, x20, [sp, #64] -; CHECK-NOTMACHO: movz x0, #42 +; CHECK-NOTMACHO: mov x0, #42 ; CHECK-NOTMACHO: ldp x22, x20, [sp, #64] ; CHECK-NOTMACHO: ldp x26, x24, [sp, #48] ; CHECK-NOTMACHO: ldr x28, [sp, #32] diff --git a/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/test/CodeGen/AArch64/arm64-shrink-wrapping.ll index 088b440e17f..16ae7ef8e1b 100644 --- a/test/CodeGen/AArch64/arm64-shrink-wrapping.ll +++ b/test/CodeGen/AArch64/arm64-shrink-wrapping.ll @@ -73,7 +73,7 @@ declare i32 @doSomething(i32, i32*) ; DISABLE: cbz w0, [[ELSE_LABEL:LBB[0-9_]+]] ; ; CHECK: mov [[SUM:w[0-9]+]], wzr -; CHECK-NEXT: movz [[IV:w[0-9]+]], #10 +; CHECK-NEXT: mov [[IV:w[0-9]+]], #10 ; ; Next BB. ; CHECK: [[LOOP:LBB[0-9_]+]]: ; %for.body @@ -140,7 +140,7 @@ declare i32 @something(...) ; CHECK-NEXT: stp [[CSR3:x[0-9]+]], [[CSR4:x[0-9]+]], [sp, #16] ; CHECK-NEXT: add [[NEW_SP:x[0-9]+]], sp, #16 ; CHECK: mov [[SUM:w[0-9]+]], wzr -; CHECK-NEXT: movz [[IV:w[0-9]+]], #10 +; CHECK-NEXT: mov [[IV:w[0-9]+]], #10 ; Next BB. ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body ; CHECK: bl _something @@ -184,7 +184,7 @@ for.end: ; preds = %for.body ; DISABLE: cbz w0, [[ELSE_LABEL:LBB[0-9_]+]] ; ; CHECK: mov [[SUM:w[0-9]+]], wzr -; CHECK-NEXT: movz [[IV:w[0-9]+]], #10 +; CHECK-NEXT: mov [[IV:w[0-9]+]], #10 ; ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body ; CHECK: bl _something @@ -255,7 +255,7 @@ declare void @somethingElse(...) ; ; CHECK: bl _somethingElse ; CHECK-NEXT: mov [[SUM:w[0-9]+]], wzr -; CHECK-NEXT: movz [[IV:w[0-9]+]], #10 +; CHECK-NEXT: mov [[IV:w[0-9]+]], #10 ; ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body ; CHECK: bl _something @@ -409,7 +409,7 @@ declare void @llvm.va_end(i8*) ; ; DISABLE: cbz w0, [[ELSE_LABEL:LBB[0-9_]+]] ; -; CHECK: movz [[IV:w[0-9]+]], #10 +; CHECK: mov [[IV:w[0-9]+]], #10 ; ; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body ; Inline asm statement. @@ -511,7 +511,7 @@ declare i32 @someVariadicFunc(i32, ...) ; CHECK: and [[TEST:w[0-9]+]], w0, #0xff ; CHECK-NEXT: cbnz [[TEST]], [[ABORT:LBB[0-9_]+]] ; -; CHECK: movz w0, #42 +; CHECK: mov w0, #42 ; ; DISABLE-NEXT: ldp ; diff --git a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll index 91862c0ca84..16ddf690fe9 100644 --- a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll +++ b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll @@ -32,7 +32,7 @@ define void @test_simple(i32 %n, ...) { ; CHECK: add [[VR_TOP:x[0-9]+]], [[VR_TOPTMP]], #128 ; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16] -; CHECK: movn [[GR_OFFS:w[0-9]+]], #55 +; CHECK: mov [[GR_OFFS:w[0-9]+]], #-56 ; CHECK: str [[GR_OFFS]], [x[[VA_LIST]], #24] ; CHECK: orr [[VR_OFFS:w[0-9]+]], wzr, #0xffffff80 @@ -70,10 +70,10 @@ define void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) { ; CHECK: add [[VR_TOP:x[0-9]+]], [[VR_TOPTMP]], #112 ; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16] -; CHECK: movn [[GR_OFFS:w[0-9]+]], #39 +; CHECK: mov [[GR_OFFS:w[0-9]+]], #-40 ; CHECK: str [[GR_OFFS]], [x[[VA_LIST]], #24] -; CHECK: movn [[VR_OFFS:w[0-9]+]], #111 +; CHECK: mov [[VR_OFFS:w[0-9]+]], #-11 ; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28] %addr = bitcast %va_list* @var to i8* diff --git a/test/CodeGen/AArch64/arm64-vecCmpBr.ll b/test/CodeGen/AArch64/arm64-vecCmpBr.ll index c7321e4b7d0..0c496fedfc2 100644 --- a/test/CodeGen/AArch64/arm64-vecCmpBr.ll +++ b/test/CodeGen/AArch64/arm64-vecCmpBr.ll @@ -59,7 +59,7 @@ define i32 @anyNonZero64(<4 x i16> %a) #0 { ; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]] ; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]] ; CHECK: [[LABEL]]: -; CHECK-NEXT: movz w0, #0 +; CHECK-NEXT: mov w0, #0 entry: %0 = bitcast <4 x i16> %a to <8 x i8> @@ -83,7 +83,7 @@ define i32 @anyNonZero128(<8 x i16> %a) #0 { ; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]] ; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]] ; CHECK: [[LABEL]]: -; CHECK-NEXT: movz w0, #0 +; CHECK-NEXT: mov w0, #0 entry: %0 = bitcast <8 x i16> %a to <16 x i8> %vmaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %0) #3 @@ -152,7 +152,7 @@ define i32 @allNonZero64(<4 x i16> %a) #0 { ; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]] ; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]] ; CHECK: [[LABEL]]: -; CHECK-NEXT: movz w0, #0 +; CHECK-NEXT: mov w0, #0 entry: %0 = bitcast <4 x i16> %a to <8 x i8> %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %0) #3 @@ -175,7 +175,7 @@ define i32 @allNonZero128(<8 x i16> %a) #0 { ; CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]] ; CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[A-Z_0-9]+]] ; CHECK: [[LABEL]]: -; CHECK-NEXT: movz w0, #0 +; CHECK-NEXT: mov w0, #0 entry: %0 = bitcast <8 x i16> %a to <16 x i8> %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %0) #3 diff --git a/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll b/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll index 349bb6fd78a..d66c9348df5 100644 --- a/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll +++ b/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll @@ -18,8 +18,8 @@ define void @t2() nounwind ssp { entry: ; CHECK-LABEL: t2: ; CHECK-NOT: mov w0, wzr -; CHECK: movz w0, #0 -; CHECK: movz w1, #0 +; CHECK: mov w0, #0 +; CHECK: mov w1, #0 tail call void @bari(i32 0, i32 0) nounwind ret void } @@ -28,8 +28,8 @@ define void @t3() nounwind ssp { entry: ; CHECK-LABEL: t3: ; CHECK-NOT: mov x0, xzr -; CHECK: movz x0, #0 -; CHECK: movz x1, #0 +; CHECK: mov x0, #0 +; CHECK: mov x1, #0 tail call void @barl(i64 0, i64 0) nounwind ret void } diff --git a/test/CodeGen/AArch64/bitfield-insert.ll b/test/CodeGen/AArch64/bitfield-insert.ll index ffbd2d31572..dae459606a7 100644 --- a/test/CodeGen/AArch64/bitfield-insert.ll +++ b/test/CodeGen/AArch64/bitfield-insert.ll @@ -380,7 +380,7 @@ entry: } ; CHECK-LABEL: @test1 -; CHECK: movz [[REG:w[0-9]+]], #5 +; CHECK: mov [[REG:w[0-9]+]], #5 ; CHECK: bfxil w0, [[REG]], #0, #4 define i32 @test1(i32 %a) { %1 = and i32 %a, -16 ; 0xfffffff0 @@ -389,7 +389,7 @@ define i32 @test1(i32 %a) { } ; CHECK-LABEL: @test2 -; CHECK: movz [[REG:w[0-9]+]], #10 +; CHECK: mov [[REG:w[0-9]+]], #10 ; CHECK: bfi w0, [[REG]], #22, #4 define i32 @test2(i32 %a) { %1 = and i32 %a, -62914561 ; 0xfc3fffff @@ -398,7 +398,7 @@ define i32 @test2(i32 %a) { } ; CHECK-LABEL: @test3 -; CHECK: movz [[REG:x[0-9]+]], #5 +; CHECK: mov [[REG:x[0-9]+]], #5 ; CHECK: bfxil x0, [[REG]], #0, #3 define i64 @test3(i64 %a) { %1 = and i64 %a, -8 ; 0xfffffffffffffff8 @@ -407,7 +407,7 @@ define i64 @test3(i64 %a) { } ; CHECK-LABEL: @test4 -; CHECK: movz [[REG:x[0-9]+]], #9 +; CHECK: mov [[REG:x[0-9]+]], #9 ; CHECK: bfi x0, [[REG]], #1, #7 define i64 @test4(i64 %a) { %1 = and i64 %a, -255 ; 0xffffffffffffff01 @@ -428,7 +428,7 @@ define i32 @test5(i32 %a) { ; BFXIL will use the same constant as the ORR, so we don't care how the constant ; is materialized (it's an equal cost either way). ; CHECK-LABEL: @test6 -; CHECK: movz [[REG:w[0-9]+]], #11, lsl #16 +; CHECK: mov [[REG:w[0-9]+]], #720896 ; CHECK: movk [[REG]], #23250 ; CHECK: bfxil w0, [[REG]], #0, #20 define i32 @test6(i32 %a) { @@ -440,7 +440,7 @@ define i32 @test6(i32 %a) { ; BFIs that require the same number of instruction to materialize the constant ; as the original ORR are okay. ; CHECK-LABEL: @test7 -; CHECK: movz [[REG:w[0-9]+]], #5, lsl #16 +; CHECK: mov [[REG:w[0-9]+]], #327680 ; CHECK: movk [[REG]], #44393 ; CHECK: bfi w0, [[REG]], #1, #19 define i32 @test7(i32 %a) { @@ -454,7 +454,7 @@ define i32 @test7(i32 %a) { ; 'and' with a 'movk', which would decrease ILP while using the same number of ; instructions. ; CHECK: @test8 -; CHECK: movz [[REG2:x[0-9]+]], #36694, lsl #32 +; CHECK: mov [[REG2:x[0-9]+]], #157599529959424 ; CHECK: and [[REG1:x[0-9]+]], x0, #0xff000000000000ff ; CHECK: movk [[REG2]], #31059, lsl #16 ; CHECK: orr x0, [[REG1]], [[REG2]] diff --git a/test/CodeGen/AArch64/cond-sel.ll b/test/CodeGen/AArch64/cond-sel.ll index 7726222fe2a..b39cea1f619 100644 --- a/test/CodeGen/AArch64/cond-sel.ll +++ b/test/CodeGen/AArch64/cond-sel.ll @@ -10,8 +10,8 @@ define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize { %tst1 = icmp ugt i32 %lhs32, %rhs32 %val1 = select i1 %tst1, i32 42, i32 52 store i32 %val1, i32* @var32 -; CHECK-DAG: movz [[W52:w[0-9]+]], #{{52|0x34}} -; CHECK-DAG: movz [[W42:w[0-9]+]], #{{42|0x2a}} +; CHECK-DAG: mov [[W52:w[0-9]+]], #{{52|0x34}} +; CHECK-DAG: mov [[W42:w[0-9]+]], #{{42|0x2a}} ; CHECK: csel {{w[0-9]+}}, [[W42]], [[W52]], hi %rhs64 = sext i32 %rhs32 to i64 @@ -34,8 +34,8 @@ define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %r ; CHECK-NOFP-NOT: fcmp %val1 = select i1 %tst1, i32 42, i32 52 store i32 %val1, i32* @var32 -; CHECK: movz [[W52:w[0-9]+]], #{{52|0x34}} -; CHECK: movz [[W42:w[0-9]+]], #{{42|0x2a}} +; CHECK: mov [[W52:w[0-9]+]], #{{52|0x34}} +; CHECK: mov [[W42:w[0-9]+]], #{{42|0x2a}} ; CHECK: csel [[MAYBETRUE:w[0-9]+]], [[W42]], [[W52]], mi ; CHECK: csel {{w[0-9]+}}, [[W42]], [[MAYBETRUE]], gt @@ -46,7 +46,7 @@ define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %r %val2 = select i1 %tst2, i64 9, i64 15 store i64 %val2, i64* @var64 ; CHECK: orr w[[CONST15:[0-9]+]], wzr, #0xf -; CHECK: movz {{[wx]}}[[CONST9:[0-9]+]], #{{9|0x9}} +; CHECK: mov {{[wx]}}[[CONST9:[0-9]+]], #{{9|0x9}} ; CHECK: csel [[MAYBETRUE:x[0-9]+]], x[[CONST9]], x[[CONST15]], eq ; CHECK: csel {{x[0-9]+}}, x[[CONST9]], [[MAYBETRUE]], vs diff --git a/test/CodeGen/AArch64/dag-combine-invaraints.ll b/test/CodeGen/AArch64/dag-combine-invaraints.ll index ac2d057ff3c..20ba3fea837 100644 --- a/test/CodeGen/AArch64/dag-combine-invaraints.ll +++ b/test/CodeGen/AArch64/dag-combine-invaraints.ll @@ -24,7 +24,7 @@ main_: ret i32 0 ; CHECK: main: -; CHECK-DAG: movz +; CHECK-DAG: mov ; CHECK-DAG: orr ; CHECK: csel } diff --git a/test/CodeGen/AArch64/fast-isel-gep.ll b/test/CodeGen/AArch64/fast-isel-gep.ll index 624c104cdcc..0cb1fd8465d 100644 --- a/test/CodeGen/AArch64/fast-isel-gep.ll +++ b/test/CodeGen/AArch64/fast-isel-gep.ll @@ -33,7 +33,7 @@ define i32* @test_array3(i32* %a) { define i32* @test_array4(i32* %a) { ; CHECK-LABEL: test_array4 -; CHECK: movz [[REG:x[0-9]+]], #4104 +; CHECK: mov [[REG:x[0-9]+]], #4104 ; CHECK-NEXR: add x0, x0, [[REG]] %1 = getelementptr inbounds i32, i32* %a, i64 1026 ret i32* %1 diff --git a/test/CodeGen/AArch64/fp16-v4-instructions.ll b/test/CodeGen/AArch64/fp16-v4-instructions.ll index d517435a356..b39ff08db39 100644 --- a/test/CodeGen/AArch64/fp16-v4-instructions.ll +++ b/test/CodeGen/AArch64/fp16-v4-instructions.ll @@ -15,7 +15,7 @@ entry: define <4 x half> @build_h4(<4 x half> %a) { entry: ; CHECK-LABEL: build_h4: -; CHECK: movz [[GPR:w[0-9]+]], #15565 +; CHECK: mov [[GPR:w[0-9]+]], #15565 ; CHECK: dup v0.4h, [[GPR]] ret <4 x half> } diff --git a/test/CodeGen/AArch64/fpimm.ll b/test/CodeGen/AArch64/fpimm.ll index 3e333237a57..b4faef750a2 100644 --- a/test/CodeGen/AArch64/fpimm.ll +++ b/test/CodeGen/AArch64/fpimm.ll @@ -38,7 +38,7 @@ define void @check_double() { } ; LARGE-LABEL: check_float2 -; LARGE: movz [[REG:w[0-9]+]], #16457, lsl #16 +; LARGE: mov [[REG:w[0-9]+]], #1078525952 ; LARGE-NEXT: movk [[REG]], #4059 ; LARGE-NEXT: fmov s0, [[REG]] define float @check_float2() { @@ -46,7 +46,7 @@ define float @check_float2() { } ; LARGE-LABEL: check_double2 -; LARGE: movz [[REG:x[0-9]+]], #16393, lsl #48 +; LARGE: mov [[REG:x[0-9]+]], #4614219293217783808 ; LARGE-NEXT: movk [[REG]], #8699, lsl #32 ; LARGE-NEXT: movk [[REG]], #21572, lsl #16 ; LARGE-NEXT: movk [[REG]], #11544 @@ -54,4 +54,3 @@ define float @check_float2() { define double @check_double2() { ret double 3.1415926535897931159979634685441851615905761718750 } - diff --git a/test/CodeGen/AArch64/func-calls.ll b/test/CodeGen/AArch64/func-calls.ll index 9be66b603b3..40ed607b06c 100644 --- a/test/CodeGen/AArch64/func-calls.ll +++ b/test/CodeGen/AArch64/func-calls.ll @@ -104,10 +104,10 @@ define void @check_stack_args() { float -2.0, float -8.0, float 16.0, float 1.0, float 64.0) -; CHECK: movz [[SIXTY_FOUR:w[0-9]+]], #17024, lsl #16 +; CHECK: mov [[SIXTY_FOUR:w[0-9]+]], #1115684864 ; CHECK: str [[SIXTY_FOUR]], [sp] -; CHECK-NONEON: movz [[SIXTY_FOUR:w[0-9]+]], #17024, lsl #16 +; CHECK-NONEON: mov [[SIXTY_FOUR:w[0-9]+]], #1115684864 ; CHECK-NONEON: str [[SIXTY_FOUR]], [sp] ; CHECK: bl stacked_fpu @@ -139,9 +139,9 @@ define void @check_i128_align() { call void @check_i128_regalign(i32 0, i128 42) ; CHECK-NOT: mov x1 -; CHECK-LE: movz x2, #{{0x2a|42}} +; CHECK-LE: mov x2, #{{0x2a|42}} ; CHECK-LE: mov x3, xzr -; CHECK-BE: movz {{x|w}}3, #{{0x2a|42}} +; CHECK-BE: mov {{x|w}}3, #{{0x2a|42}} ; CHECK-BE: mov x2, xzr ; CHECK: bl check_i128_regalign diff --git a/test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll b/test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll index 369e0029485..ca24fc9c880 100644 --- a/test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll +++ b/test/CodeGen/AArch64/inlineasm-ldr-pseudo.ll @@ -5,7 +5,7 @@ ; RUN: llc -mtriple=aarch64 < %s -filetype=obj | llvm-objdump -arch=aarch64 -d - | FileCheck %s ; CHECK-LABEL: foo: -; CHECK: a0 79 95 d2 movz x0, #43981 +; CHECK: a0 79 95 d2 mov x0, #43981 ; CHECK: c0 03 5f d6 ret define i32 @foo() nounwind { entry: @@ -22,5 +22,3 @@ entry: %0 = tail call i32 asm sideeffect "ldr $0,=0x10001", "=r"() nounwind ret i32 %0 } - - diff --git a/test/CodeGen/AArch64/logical-imm.ll b/test/CodeGen/AArch64/logical-imm.ll index a5e4a9956de..6f562230d93 100644 --- a/test/CodeGen/AArch64/logical-imm.ll +++ b/test/CodeGen/AArch64/logical-imm.ll @@ -73,11 +73,11 @@ define void @test_mov(i32 %in32, i64 %in64) { ; CHECK-LABEL: test_mov: %val0 = add i32 %in32, 2863311530 store i32 %val0, i32* @var32 -; CHECK: orr {{w[0-9]+}}, wzr, #0xaaaaaaaa +; CHECK: mov {{w[0-9]+}}, #-1431655766 %val1 = add i64 %in64, 11068046444225730969 store i64 %val1, i64* @var64 -; CHECK: orr {{x[0-9]+}}, xzr, #0x9999999999999999 +; CHECK: mov {{x[0-9]+}}, #-7378697629483820647 ret void ; CHECK: ret diff --git a/test/CodeGen/AArch64/movw-consts.ll b/test/CodeGen/AArch64/movw-consts.ll index 277c932fcd3..def6072e0bc 100644 --- a/test/CodeGen/AArch64/movw-consts.ll +++ b/test/CodeGen/AArch64/movw-consts.ll @@ -53,19 +53,19 @@ define i64 @test7() { ; couldn't. Useful even for i64 define i64 @test8() { ; CHECK-LABEL: test8: -; CHECK: movn w0, #{{60875|0xedcb}} +; CHECK: mov w0, #-60876 ret i64 4294906420 } define i64 @test9() { ; CHECK-LABEL: test9: -; CHECK: movn x0, #0 +; CHECK: mov x0, #-1 ret i64 -1 } define i64 @test10() { ; CHECK-LABEL: test10: -; CHECK: movn x0, #{{60875|0xedcb}}, lsl #16 +; CHECK: mov x0, #-3989504001 ret i64 18446744069720047615 } @@ -110,7 +110,7 @@ define void @test15() { define void @test16() { ; CHECK-LABEL: test16: -; CHECK: movn {{w[0-9]+}}, #0 +; CHECK: mov {{w[0-9]+}}, #-1 store i32 -1, i32* @var32 ret void } diff --git a/test/CodeGen/AArch64/sibling-call.ll b/test/CodeGen/AArch64/sibling-call.ll index a68fdec4cfb..925d1881f56 100644 --- a/test/CodeGen/AArch64/sibling-call.ll +++ b/test/CodeGen/AArch64/sibling-call.ll @@ -92,6 +92,6 @@ define void @indirect_tail() { tail call void %fptr(i32 42) ret void ; CHECK: ldr [[FPTR:x[1-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:func] -; CHECK: movz w0, #{{42|0x2a}} +; CHECK: mov w0, #{{42|0x2a}} ; CHECK: br [[FPTR]] } diff --git a/test/DebugInfo/AArch64/constant-dbgloc.ll b/test/DebugInfo/AArch64/constant-dbgloc.ll index 5b5cd7ed08e..a3dbf453edb 100644 --- a/test/DebugInfo/AArch64/constant-dbgloc.ll +++ b/test/DebugInfo/AArch64/constant-dbgloc.ll @@ -10,7 +10,7 @@ target triple = "aarch64--linux-gnueabihf" ; } ; CHECK: test.c:4:5 -; CHECK: movn +; CHECK: mov ; Function Attrs: nounwind define i32 @main() !dbg !4 { diff --git a/test/MC/AArch64/arm64-aliases.s b/test/MC/AArch64/arm64-aliases.s index 1d7c48ca9b1..3ace7a0f718 100644 --- a/test/MC/AArch64/arm64-aliases.s +++ b/test/MC/AArch64/arm64-aliases.s @@ -134,18 +134,101 @@ foo: mov x0, #281470681743360 mov x0, #18446744073709486080 -; CHECK: movz x0, #0xffff, lsl #32 -; CHECK: movn x0, #0xffff +; CHECK: mov x0, #0xffff00000000 +; CHECK: mov x0, #-0x10000 mov w0, #0xffffffff mov w0, #0xffffff00 mov wzr, #0xffffffff mov wzr, #0xffffff00 -; CHECK: movn w0, #0 -; CHECK: movn w0, #0xff -; CHECK: movn wzr, #0 -; CHECK: movn wzr, #0xff +; CHECK: mov w0, #-0x1 +; CHECK: mov w0, #-0x100 +; CHECK: mov wzr, #-0x1 +; CHECK: mov wzr, #-0x100 + + ; 0 can be encoded by MOVZ in multiple ways, only "lsl #0" is a MOV alias. + movz x0, #0 + movz x0, #0, lsl #16 + movz x0, #0, lsl #32 + movz x0, #0, lsl #48 + movz w0, #0 + movz w0, #0, lsl #16 +; CHECK: mov x0, #0x0 +; CHECK: movz x0, #0x0, lsl #16 +; CHECK: movz x0, #0x0, lsl #32 +; CHECK: movz x0, #0x0, lsl #48 +; CHECK: mov w0, #0x0 +; CHECK: movz w0, #0x0, lsl #16 + + ; Similarly to MOVZ, -1 can be encoded in multiple ways, only one of which is + ; "MOV". + movn x0, #0 + movn x0, #0, lsl #16 + movn x0, #0, lsl #32 + movn x0, #0, lsl #48 + movn w0, #0 + movn w0, #0, lsl #16 +; CHECK: mov x0, #-0x1 +; CHECK: movn x0, #0x0, lsl #16 +; CHECK: movn x0, #0x0, lsl #32 +; CHECK: movn x0, #0x0, lsl #48 +; CHECK: mov w0, #-0x1 +; CHECK: movn w0, #0x0, lsl #16 + + ; Two 32-bit immediates are encodable by both MOVN and MOVZ, make sure the MOV + ; corresponds to the MOVZ version. + movz w0, #0xffff + movz w0, #0xffff, lsl #16 + movn w0, #0xffff + movn w0, #0xffff, lsl #16 +; CHECK: mov w0, #0xffff +; CHECK: mov w0, #-0x10000 +; CHECK: movn w0, #0xffff +; CHECK: movn w0, #0xffff, lsl #16 + + orr x20, xzr, #0xaaaaaaaaaaaaaaaa + orr w15, wzr, #0xaaaaaaaa +; CHECK: mov x20, #-0x5555555555555556 +; CHECK: mov w15, #-0x55555556 + + ; ORR is mostly repeating bit sequences and cannot encode -1, so it only + ; overlaps with MOVZ or MOVN if the repeat-width is the whole register. In + ; both cases MOVZ/MOVN are preferred. + orr x3, xzr, #0x1 + orr w3, wzr, #0x1 + orr x3, xzr, #0x10000 + orr w3, wzr, #0x10000 + orr x3, xzr, #0x700000000 + orr x3, xzr, #0x3000000000000 +; CHECK: orr x3, xzr, #0x1 +; CHECK: orr w3, wzr, #0x1 +; CHECK: orr x3, xzr, #0x10000 +; CHECK: orr w3, wzr, #0x10000 +; CHECK: orr x3, xzr, #0x700000000 +; CHECK: orr x3, xzr, #0x3000000000000 + + + orr x5, xzr, #0xfffffffffffffff0 + orr w2, wzr, #0xfffffffe + orr x5, xzr, #0xfffffffffcffffff + orr w2, wzr, #0xf0ffffff + orr x5, xzr, #0xffffff00ffffffff + orr x5, xzr, #0x8000ffffffffffff +; CHECK: orr x5, xzr, #0xfffffffffffffff0 +; CHECK: orr w2, wzr, #0xfffffffe +; CHECK: orr x5, xzr, #0x8000ffffffffffff + + ; 0xffff is interesting because there are exceptions in the MOVN rules for + ; it. Make sure we don't accidentally fall down any of those holes. + orr w3, wzr, #0xffff0000 + orr w3, wzr, #0xffff + orr x3, xzr, #0xffff000000000000 + orr x5, xzr, #0x0000ffffffffffff +; CHECK: orr w3, wzr, #0xffff0000 +; CHECK: orr w3, wzr, #0xffff +; CHECK: orr x3, xzr, #0xffff000000000000 +; CHECK: orr x5, xzr, #0xffffffffffff ;----------------------------------------------------------------------------- ; MVN aliases diff --git a/test/MC/AArch64/arm64-arithmetic-encoding.s b/test/MC/AArch64/arm64-arithmetic-encoding.s index 63532e6b798..7b090692de3 100644 --- a/test/MC/AArch64/arm64-arithmetic-encoding.s +++ b/test/MC/AArch64/arm64-arithmetic-encoding.s @@ -494,20 +494,20 @@ foo: movz w0, #1, lsl #16 movz x0, #1, lsl #16 -; CHECK: movz w0, #1 ; encoding: [0x20,0x00,0x80,0x52] -; CHECK: movz x0, #1 ; encoding: [0x20,0x00,0x80,0xd2] -; CHECK: movz w0, #1, lsl #16 ; encoding: [0x20,0x00,0xa0,0x52] -; CHECK: movz x0, #1, lsl #16 ; encoding: [0x20,0x00,0xa0,0xd2] +; CHECK: mov w0, #1 ; encoding: [0x20,0x00,0x80,0x52] +; CHECK: mov x0, #1 ; encoding: [0x20,0x00,0x80,0xd2] +; CHECK: mov w0, #65536 ; encoding: [0x20,0x00,0xa0,0x52] +; CHECK: mov x0, #65536 ; encoding: [0x20,0x00,0xa0,0xd2] movn w0, #2 movn x0, #2 movn w0, #2, lsl #16 movn x0, #2, lsl #16 -; CHECK: movn w0, #2 ; encoding: [0x40,0x00,0x80,0x12] -; CHECK: movn x0, #2 ; encoding: [0x40,0x00,0x80,0x92] -; CHECK: movn w0, #2, lsl #16 ; encoding: [0x40,0x00,0xa0,0x12] -; CHECK: movn x0, #2, lsl #16 ; encoding: [0x40,0x00,0xa0,0x92] +; CHECK: mov w0, #-3 ; encoding: [0x40,0x00,0x80,0x12] +; CHECK: mov x0, #-3 ; encoding: [0x40,0x00,0x80,0x92] +; CHECK: mov w0, #-131073 ; encoding: [0x40,0x00,0xa0,0x12] +; CHECK: mov x0, #-131073 ; encoding: [0x40,0x00,0xa0,0x92] movk w0, #1 movk x0, #1 diff --git a/test/MC/AArch64/basic-a64-instructions.s b/test/MC/AArch64/basic-a64-instructions.s index f8e49432145..69229848fde 100644 --- a/test/MC/AArch64/basic-a64-instructions.s +++ b/test/MC/AArch64/basic-a64-instructions.s @@ -3267,8 +3267,8 @@ _func: mov w3, #0xf000f mov x10, #0xaaaaaaaaaaaaaaaa -// CHECK: orr w3, wzr, #0xf000f // encoding: [0xe3,0x8f,0x00,0x32] -// CHECK: orr x10, xzr, #0xaaaaaaaaaaaaaaaa // encoding: [0xea,0xf3,0x01,0xb2] +// CHECK: mov w3, #983055 // encoding: [0xe3,0x8f,0x00,0x32] +// CHECK: mov x10, #-6148914691236517206 // encoding: [0xea,0xf3,0x01,0xb2] // The Imm field of logicalImm operations has to be truncated to the // register width, i.e. 32 bits @@ -3355,13 +3355,13 @@ _func: movz w1, #65535, lsl #0 movz w2, #0, lsl #16 movn w2, #1234, lsl #0 -// CHECK: movz w1, #{{65535|0xffff}} // encoding: [0xe1,0xff,0x9f,0x52] +// CHECK: mov w1, #65535 // encoding: [0xe1,0xff,0x9f,0x52] // CHECK: movz w2, #0, lsl #16 // encoding: [0x02,0x00,0xa0,0x52] -// CHECK: movn w2, #{{1234|0x4d2}} // encoding: [0x42,0x9a,0x80,0x12] +// CHECK: mov w2, #-1235 // encoding: [0x42,0x9a,0x80,0x12] movz x2, #1234, lsl #32 movk xzr, #4321, lsl #48 -// CHECK: movz x2, #{{1234|0x4d2}}, lsl #32 // encoding: [0x42,0x9a,0xc0,0xd2] +// CHECK: mov x2, #5299989643264 // encoding: [0x42,0x9a,0xc0,0xd2] // CHECK: movk xzr, #{{4321|0x10e1}}, lsl #48 // encoding: [0x3f,0x1c,0xe2,0xf2] movz x2, #:abs_g0:sym diff --git a/test/MC/AArch64/ldr-pseudo.s b/test/MC/AArch64/ldr-pseudo.s index 6a437f3cb36..6d2bbe8db16 100644 --- a/test/MC/AArch64/ldr-pseudo.s +++ b/test/MC/AArch64/ldr-pseudo.s @@ -8,17 +8,17 @@ // CHECK-LABEL: f1: f1: ldr x0, =0x1234 -// CHECK: movz x0, #0x1234 +// CHECK: mov x0, #0x1234 ldr w1, =0x4567 -// CHECK: movz w1, #0x4567 +// CHECK: mov w1, #0x4567 ldr x0, =0x12340000 -// CHECK: movz x0, #0x1234, lsl #16 +// CHECK: mov x0, #0x12340000 ldr w1, =0x45670000 -// CHECK: movz w1, #0x4567, lsl #16 +// CHECK: mov w1, #0x45670000 ldr x0, =0xabc00000000 -// CHECK: movz x0, #0xabc, lsl #32 +// CHECK: mov x0, #0xabc00000000 ldr x0, =0xbeef000000000000 -// CHECK: movz x0, #0xbeef, lsl #48 +// CHECK: mov x0, #-0x4111000000000000 .section b,"ax",@progbits // CHECK-LABEL: f3: @@ -128,7 +128,7 @@ f13: adds x0, x0, #1 adds x0, x0, #1 ldr w0, =0x101 -// CHECK: movz w0, #0x101 +// CHECK: mov w0, #0x101 adds x0, x0, #1 adds x0, x0, #1 ldr w0, =bar diff --git a/test/MC/AArch64/single-slash.s b/test/MC/AArch64/single-slash.s index c5a443001a7..83e87bc95d2 100644 --- a/test/MC/AArch64/single-slash.s +++ b/test/MC/AArch64/single-slash.s @@ -2,5 +2,5 @@ // Test that a single slash is not mistaken as the start of comment. -//CHECK: movz x0, #16 +//CHECK: mov x0, #16 movz x0, #(32 / 2) diff --git a/test/MC/Disassembler/AArch64/arm64-arithmetic.txt b/test/MC/Disassembler/AArch64/arm64-arithmetic.txt index 6ba474ff007..95b44858e84 100644 --- a/test/MC/Disassembler/AArch64/arm64-arithmetic.txt +++ b/test/MC/Disassembler/AArch64/arm64-arithmetic.txt @@ -452,20 +452,20 @@ 0x20 0x00 0xa0 0x52 0x20 0x00 0xa0 0xd2 -# CHECK: movz w0, #1 -# CHECK: movz x0, #1 -# CHECK: movz w0, #1, lsl #16 -# CHECK: movz x0, #1, lsl #16 +# CHECK: mov w0, #1 +# CHECK: mov x0, #1 +# CHECK: mov w0, #65536 +# CHECK: mov x0, #65536 0x40 0x00 0x80 0x12 0x40 0x00 0x80 0x92 0x40 0x00 0xa0 0x12 0x40 0x00 0xa0 0x92 -# CHECK: movn w0, #2 -# CHECK: movn x0, #2 -# CHECK: movn w0, #2, lsl #16 -# CHECK: movn x0, #2, lsl #16 +# CHECK: mov w0, #-3 +# CHECK: mov x0, #-3 +# CHECK: mov w0, #-131073 +# CHECK: mov x0, #-131073 0x20 0x00 0x80 0x72 0x20 0x00 0x80 0xf2 diff --git a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt index 185f0c1124a..9d6723a96e4 100644 --- a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt +++ b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt @@ -2907,8 +2907,8 @@ 0x7f 0xf0 0x1 0xf2 0xff 0xf3 0x0 0xf2 -# CHECK: orr w3, wzr, #0xf000f -# CHECK: orr x10, xzr, #0xaaaaaaaaaaaaaaaa +# CHECK: mov w3, #983055 +# CHECK: mov x10, #-6148914691236517206 0xe3 0x8f 0x0 0x32 0xea 0xf3 0x1 0xb2 @@ -2991,19 +2991,19 @@ # limitation in InstAlias. Lots of the "mov[nz]" instructions should # be "mov". -# CHECK: movz w1, #{{65535|0xffff}} +# CHECK: mov w1, #{{65535|0xffff}} # CHECK: movz w2, #0, lsl #16 -# CHECK: movn w2, #{{1234|0x4d2}} +# CHECK: mov w2, #-1235 0xe1 0xff 0x9f 0x52 0x2 0x0 0xa0 0x52 0x42 0x9a 0x80 0x12 -# CHECK: movz x2, #{{1234|0x4d2}}, lsl #32 +# CHECK: mov x2, #5299989643264 # CHECK: movk xzr, #{{4321|0x10e1}}, lsl #48 0x42 0x9a 0xc0 0xd2 0x3f 0x1c 0xe2 0xf2 -# CHECK: movz x2, #0 +# CHECK: mov x2, #0 # CHECK: movk w3, #0 # CHECK: movz x4, #0, lsl #16 # CHECK: movk w5, #0, lsl #16