--- /dev/null
+; RUN: llc -mtriple=i686-unknown-linux-gnu -enable-misched=false < %s | FileCheck %s
+
+define fastcc i8 @fold32to8(i32 %add, i8 %spill) {
+; CHECK-LABEL: fold32to8:
+; CHECK: movl %ecx, (%esp) # 4-byte Spill
+; CHECK: movl (%esp), %eax # 4-byte Reload
+; CHECK: subb %al, %dl
+entry:
+ tail call void asm sideeffect "", "~{eax},~{ebx},~{ecx},~{edi},~{esi},~{ebp},~{dirflag},~{fpsr},~{flags}"()
+ %trunc = trunc i32 %add to i8
+ %sub = sub i8 %spill, %trunc
+ ret i8 %sub
+}
+
+; Do not fold a 1-byte store into a 4-byte spill slot
+define fastcc i8 @nofold(i32 %add, i8 %spill) {
+; CHECK-LABEL: nofold:
+; CHECK: movl %edx, (%esp) # 4-byte Spill
+; CHECK: movl (%esp), %eax # 4-byte Reload
+; CHECK: subb %cl, %al
+entry:
+ tail call void asm sideeffect "", "~{eax},~{ebx},~{edx},~{edi},~{esi},~{ebp},~{dirflag},~{fpsr},~{flags}"()
+ %trunc = trunc i32 %add to i8
+ %sub = sub i8 %spill, %trunc
+ ret i8 %sub
+}
--- /dev/null
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -enable-misched=false < %s | FileCheck %s
+
+define i32 @fold64to32(i64 %add, i32 %spill) {
+; CHECK-LABEL: fold64to32:
+; CHECK: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; CHECK: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; CHECK: subl %eax, %esi
+entry:
+ tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"()
+ %trunc = trunc i64 %add to i32
+ %sub = sub i32 %spill, %trunc
+ ret i32 %sub
+}
+
+define i8 @fold64to8(i64 %add, i8 %spill) {
+; CHECK-LABEL: fold64to8:
+; CHECK: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; CHECK: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; CHECK: subb %al, %sil
+entry:
+ tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"()
+ %trunc = trunc i64 %add to i8
+ %sub = sub i8 %spill, %trunc
+ ret i8 %sub
+}
+
+; Do not fold a 4-byte store into a 8-byte spill slot
+; CHECK-LABEL: nofold
+; CHECK: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; CHECK: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; CHECK: subl %edi, %eax
+; CHECK: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; CHECK: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+define i32 @nofold(i64 %add, i64 %spill) {
+entry:
+ tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"()
+ %trunc = trunc i64 %add to i32
+ %truncspill = trunc i64 %spill to i32
+ %sub = sub i32 %truncspill, %trunc
+ tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"()
+ ret i32 %sub
+}
+