]> granicus.if.org Git - libvpx/commitdiff
Use aligned copy in 8x8 Hadamard transform SSE2
authorJingning Han <jingning@google.com>
Tue, 31 Mar 2015 17:08:29 +0000 (10:08 -0700)
committerGerrit Code Review <gerrit@gerrit.golo.chromium.org>
Tue, 31 Mar 2015 17:21:52 +0000 (10:21 -0700)
This reduces the 8x8 Hadamard transform cycles by 20%.

Change-Id: If34c5e02f3afa42244c6efabe121f7cf5d2df41b

vp9/encoder/x86/vp9_avg_intrin_sse2.c

index f2c7c645f35888b3f5f1e1b9d7eaa27ca634afd8..ecd6ce9a2ec93b64056b496cef45606fd22f2043 100644 (file)
@@ -148,21 +148,21 @@ void vp9_hadamard_8x8_sse2(int16_t const *src_diff, int src_stride,
   hadamard_col8_sse2(src, 0);
   hadamard_col8_sse2(src, 1);
 
-  _mm_storeu_si128((__m128i *)coeff, src[0]);
+  _mm_store_si128((__m128i *)coeff, src[0]);
   coeff += 8;
-  _mm_storeu_si128((__m128i *)coeff, src[1]);
+  _mm_store_si128((__m128i *)coeff, src[1]);
   coeff += 8;
-  _mm_storeu_si128((__m128i *)coeff, src[2]);
+  _mm_store_si128((__m128i *)coeff, src[2]);
   coeff += 8;
-  _mm_storeu_si128((__m128i *)coeff, src[3]);
+  _mm_store_si128((__m128i *)coeff, src[3]);
   coeff += 8;
-  _mm_storeu_si128((__m128i *)coeff, src[4]);
+  _mm_store_si128((__m128i *)coeff, src[4]);
   coeff += 8;
-  _mm_storeu_si128((__m128i *)coeff, src[5]);
+  _mm_store_si128((__m128i *)coeff, src[5]);
   coeff += 8;
-  _mm_storeu_si128((__m128i *)coeff, src[6]);
+  _mm_store_si128((__m128i *)coeff, src[6]);
   coeff += 8;
-  _mm_storeu_si128((__m128i *)coeff, src[7]);
+  _mm_store_si128((__m128i *)coeff, src[7]);
 }
 
 void vp9_hadamard_16x16_sse2(int16_t const *src_diff, int src_stride,