+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s
;
; Check that x86's peephole optimization doesn't fold a 64-bit load (movsd) into
declare void @foo3(%struct.S1*)
-; CHECK: movsd {{[0-9]*}}(%rsp), [[R0:%xmm[0-9]+]]
-; CHECK: addpd [[R0]], %xmm{{[0-9]+}}
-
-define void @foo1(double %a.coerce0, double %a.coerce1, double %b.coerce0, double %b.coerce1) {
+define void @foo1(double %a.coerce0, double %a.coerce1, double %b.coerce0, double %b.coerce1) nounwind {
+; CHECK-LABEL: foo1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subq $24, %rsp
+; CHECK-NEXT: movq %rsp, %rdi
+; CHECK-NEXT: callq foo3
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movapd {{.*#+}} xmm1 = <1.0E+0,u>
+; CHECK-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT: addpd %xmm0, %xmm1
+; CHECK-NEXT: movapd %xmm1, {{.*}}(%rip)
+; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: retq
%1 = alloca <2 x double>, align 16
%tmpcast = bitcast <2 x double>* %1 to %struct.S1*
call void @foo3(%struct.S1* %tmpcast) #2