push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 2
- SAVE_XMM
+ SAVE_XMM 6
push rsi
push rdi
; end prolog
movdqa xmm4, xmm0
punpcklqdq xmm0, xmm3 ;d1 a1
punpckhqdq xmm4, xmm3 ;c1 b1
- movd xmm7, eax
+ movd xmm6, eax
movdqa xmm1, xmm4 ;c1 b1
paddw xmm4, xmm0 ;dl+cl a1+b1 aka op[4] op[0]
pshufd xmm2, xmm1, 4eh ;ip[8] ip[12]
movdqa xmm3, xmm4 ;ip[4] ip[0]
- pshufd xmm7, xmm7, 0 ;03 03 03 03 03 03 03 03
+ pshufd xmm6, xmm6, 0 ;03 03 03 03 03 03 03 03
paddw xmm4, xmm2 ;ip[4]+ip[8] ip[0]+ip[12] aka b1 a1
psubw xmm3, xmm2 ;ip[4]-ip[8] ip[0]-ip[12] aka c1 d1
punpcklwd xmm5, xmm0 ; 31 21 11 01 30 20 10 00
punpckhwd xmm1, xmm0 ; 33 23 13 03 32 22 12 02
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- paddw xmm5, xmm7
- paddw xmm1, xmm7
+ paddw xmm5, xmm6
+ paddw xmm1, xmm6
psraw xmm5, 3
psraw xmm1, 3
movsxd rdx, dword ptr arg(3) ; ref_stride
%else
%ifidn __OUTPUT_FORMAT__,x64
+ SAVE_XMM 7, u
%define src_ptr rcx
%define src_stride rdx
%define ref_ptr r8
%define ref_stride r9
%define end_ptr r10
%define ret_var r11
- %define result_ptr [rsp+40+4*8]
- %define max_err [rsp+40+4*8]
- SAVE_XMM
+ %define result_ptr [rsp+xmm_stack_space+8+4*8]
+ %define max_err [rsp+xmm_stack_space+8+4*8]
%else
%define src_ptr rdi
%define src_stride rsi
xchg rbx, rax
%else
%ifidn __OUTPUT_FORMAT__,x64
+ SAVE_XMM 7, u
%define src_ptr rcx
%define src_stride rdx
%define r0_ptr rsi
%define r2_ptr r11
%define r3_ptr r8
%define ref_stride r9
- %define result_ptr [rsp+48+4*8]
- SAVE_XMM
+ %define result_ptr [rsp+xmm_stack_space+16+4*8]
push rsi
LOAD_X4_ADDRESSES r8, r0_ptr, r1_ptr, r2_ptr, r3_ptr
%define UNSHADOW_ARGS mov rsp, rbp
%endif
-; must keep XMM6:XMM15 (libvpx uses XMM6 and XMM7) on Win64 ABI
-; rsp register has to be aligned
+; Win64 ABI requires that XMM6:XMM15 are callee saved
+; SAVE_XMM n, [u]
+; store registers 6-n on the stack
+; if u is specified, use unaligned movs.
+; Win64 ABI requires 16 byte stack alignment, but then pushes an 8 byte return
+; value. Typically we follow this up with 'push rbp' - re-aligning the stack -
+; but in some cases this is not done and unaligned movs must be used.
%ifidn __OUTPUT_FORMAT__,x64
-%macro SAVE_XMM 0
- sub rsp, 32
- movdqu XMMWORD PTR [rsp], xmm6
- movdqu XMMWORD PTR [rsp+16], xmm7
+%macro SAVE_XMM 1-2 a
+ %if %1 < 6
+ %error Only xmm registers 6-15 must be preserved
+ %else
+ %assign last_xmm %1
+ %define movxmm movdq %+ %2
+ %assign xmm_stack_space ((last_xmm - 5) * 16)
+ sub rsp, xmm_stack_space
+ %assign i 6
+ %rep (last_xmm - 5)
+ movxmm [rsp + ((i - 6) * 16)], xmm %+ i
+ %assign i i+1
+ %endrep
+ %endif
%endmacro
%macro RESTORE_XMM 0
- movdqu xmm6, XMMWORD PTR [rsp]
- movdqu xmm7, XMMWORD PTR [rsp+16]
- add rsp, 32
+ %ifndef last_xmm
+ %error RESTORE_XMM must be paired with SAVE_XMM n
+ %else
+ %assign i last_xmm
+ %rep (last_xmm - 5)
+ movxmm xmm %+ i, [rsp +((i - 6) * 16)]
+ %assign i i-1
+ %endrep
+ add rsp, xmm_stack_space
+ ; there are a couple functions which return from multiple places.
+ ; otherwise, we could uncomment these:
+ ; %undef last_xmm
+ ; %undef xmm_stack_space
+ ; %undef movxmm
+ %endif
%endmacro
%else
-%macro SAVE_XMM 0
+%macro SAVE_XMM 1-2
%endmacro
%macro RESTORE_XMM 0
%endmacro