Summary:
Because GCC doesn't use $1 for code generation, inline assembly code can use $1 without having to add it to the clobbers list.
LLVM, on the other hand, does not shy away from using $1, and this can cause conflicts with inline assembly which assumes GCC-like code generation.
A solution to this problem is to make Clang automatically clobber $1 for all MIPS inline assembly.
This is not the optimal solution, but it seems like a necessary compromise, for now.
Reviewers: dsanders
Reviewed By: dsanders
Subscribers: cfe-commits
Differential Revision: http://reviews.llvm.org/D6638
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@224428
91177308-0d34-0410-b5e6-
96231b3b80d8
}
const char *getClobbers() const override {
- // FIXME: Implement!
- return "";
+ return "~{$1}";
}
bool handleTargetFeatures(std::vector<std::string> &Features,
// 'c': 16 bit address register for Mips16, GPR for all others
// I am using 'c' to constrain both the target and one of the source
// registers. We are looking for syntactical correctness.
- // CHECK: %{{[0-9]+}} = call i32 asm sideeffect "addi $0,$1,$2 \0A\09\09", "=c,c,I"(i32 %{{[0-9]+}}, i32 %{{[0-9]+}}) [[NUW:#[0-9]+]], !srcloc !{{[0-9]+}}
+ // CHECK: %{{[0-9]+}} = call i32 asm sideeffect "addi $0,$1,$2 \0A\09\09", "=c,c,I,~{$1}"(i32 %{{[0-9]+}}, i32 %{{[0-9]+}}) [[NUW:#[0-9]+]], !srcloc !{{[0-9]+}}
int __s, __v = 17;
int __t;
__asm__ __volatile__(
// 'l': lo register
// We are making it clear that destination register is lo with the
// use of the 'l' constraint ("=l").
- // CHECK: %{{[0-9]+}} = call i32 asm sideeffect "mtlo $1 \0A\09\09", "=l,r,~{lo}"(i32 %{{[0-9]+}}) [[NUW]], !srcloc !{{[0-9]+}}
+ // CHECK: %{{[0-9]+}} = call i32 asm sideeffect "mtlo $1 \0A\09\09", "=l,r,~{lo},~{$1}"(i32 %{{[0-9]+}}) [[NUW]], !srcloc !{{[0-9]+}}
int i_temp = 44;
int i_result;
__asm__ __volatile__(
// 'x': Combined lo/hi registers
// We are specifying that destination registers are the hi/lo pair with the
// use of the 'x' constraint ("=x").
- // CHECK: %{{[0-9]+}} = call i64 asm sideeffect "mthi $1 \0A\09\09mtlo $2 \0A\09\09", "=x,r,r"(i32 %{{[0-9]+}}, i32 %{{[0-9]+}}) [[NUW]], !srcloc !{{[0-9]+}}
+ // CHECK: %{{[0-9]+}} = call i64 asm sideeffect "mthi $1 \0A\09\09mtlo $2 \0A\09\09", "=x,r,r,~{$1}"(i32 %{{[0-9]+}}, i32 %{{[0-9]+}}) [[NUW]], !srcloc !{{[0-9]+}}
int i_hi = 3;
int i_lo = 2;
long long ll_result = 0;
// 'R': An address that can be used in a non-macro load or stor'
// This test will result in the higher and lower nibbles being
// switched due to the lwl/lwr instruction pairs.
- // CHECK: %{{[0-9]+}} = call i32 asm sideeffect "lwl $0, 1 + $1\0A\09lwr $0, 2 + $1\0A\09", "=r,*R"(i32* %{{[0-9,a-f]+}}) #1,
+ // CHECK: %{{[0-9]+}} = call i32 asm sideeffect "lwl $0, 1 + $1\0A\09lwr $0, 2 + $1\0A\09", "=r,*R,~{$1}"(i32* %{{[0-9,a-f]+}}) #1,
int c = 0xffbbccdd;
typedef int v4i32 __attribute__((vector_size(16)));
- // CHECK: %{{[0-9]+}} = call i32 asm ".set noreorder;\0Alw $0,$1;\0A.set reorder;\0A", "=r,*m"(i32* getelementptr inbounds ([8 x i32]* @b, i32 {{[0-9]+}}, i32 {{[0-9]+}})) #2,
- // CHECK: %{{[0-9]+}} = call i32 asm "lw $0,${1:D};\0A", "=r,*m"(i32* getelementptr inbounds ([8 x i32]* @b, i32 {{[0-9]+}}, i32 {{[0-9]+}})) #2,
- // CHECK: %{{[0-9]+}} = call <4 x i32> asm "ldi.w ${0:w},1", "=f"
+ // CHECK: %{{[0-9]+}} = call i32 asm ".set noreorder;\0Alw $0,$1;\0A.set reorder;\0A", "=r,*m,~{$1}"(i32* getelementptr inbounds ([8 x i32]* @b, i32 {{[0-9]+}}, i32 {{[0-9]+}})) #2,
+ // CHECK: %{{[0-9]+}} = call i32 asm "lw $0,${1:D};\0A", "=r,*m,~{$1}"(i32* getelementptr inbounds ([8 x i32]* @b, i32 {{[0-9]+}}, i32 {{[0-9]+}})) #2,
+ // CHECK: %{{[0-9]+}} = call <4 x i32> asm "ldi.w ${0:w},1", "=f,~{$1}"
int b[8] = {0,1,2,3,4,5,6,7};
int main()
{
// CHECK: @single_m
void single_m()
{
- // CHECK: call void asm "foo $1,$0", "=*m,*m[[CLOBBERS:[a-zA-Z0-9@%{},~_ ]*\"]](i32* {{[a-zA-Z0-9@%]+}}, i32* {{[a-zA-Z0-9@%]+}})
+ // CHECK: call void asm "foo $1,$0", "=*m,*m[[CLOBBERS:[a-zA-Z0-9@%{},~_$ ]*\"]](i32* {{[a-zA-Z0-9@%]+}}, i32* {{[a-zA-Z0-9@%]+}})
asm("foo %1,%0" : "=m" (mout0) : "m" (min1));
}