From 01e47fd2c1f4d15d4d91a22d851c71a658473382 Mon Sep 17 00:00:00 2001 From: Joerg Sonnenberger Date: Sat, 27 Jul 2019 18:57:59 +0000 Subject: [PATCH] Stricter check for the memory access. The current pattern would trigger for scheduling changes of the post-load computation, since those are commutable with the inline asm. Avoid this by explicitly check the order of load vs asm block. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@367180 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/inlineasm-sched-bug.ll | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/CodeGen/X86/inlineasm-sched-bug.ll b/test/CodeGen/X86/inlineasm-sched-bug.ll index 25bf5e07ce7..b8934969655 100644 --- a/test/CodeGen/X86/inlineasm-sched-bug.ll +++ b/test/CodeGen/X86/inlineasm-sched-bug.ll @@ -1,7 +1,9 @@ ; PR13504 ; RUN: llc -mtriple=i686-- -mcpu=atom < %s | FileCheck %s +; Check that treemap is read before the asm statement. +; CHECK: movl 8(%{{esp|ebp}}) ; CHECK: bsfl -; CHECK-NOT: movl +; CHECK-NOT: movl 8(%{{esp|ebp}}) define i32 @foo(i32 %treemap) nounwind uwtable { entry: -- 2.49.0