]> granicus.if.org Git - libvpx/commitdiff
WIP: Added sse2 version of vp9_mb_lpf_horizontal_edge_w
authorScott LaVarnway <slavarnway@google.com>
Sat, 12 Jan 2013 01:11:04 +0000 (17:11 -0800)
committerScott LaVarnway <slavarnway@google.com>
Sat, 12 Jan 2013 01:11:04 +0000 (17:11 -0800)
and vp9_mb_lpf_vertical_edge_w_sse2.  This was quickly done so we can
run some tests over the weekend.  Future commits will optimize/refactor these
functions further.

The decoder performance improved by ~17% for the clip used.

Change-Id: I612687cd5a7670ee840a0cbc3c68dc2b84d4af76

vp9/common/x86/vp9_loopfilter_x86.c

index 9f46eec4c63cc27a54a1bab72f2a1dc578f6e0a0..3cf10ba205deae2c78f858e23a71a767ec82d9c0 100644 (file)
@@ -85,6 +85,482 @@ void vp9_loop_filter_bvs_mmx(unsigned char *y_ptr, int y_stride,
 #endif
 
 #if HAVE_SSE2
+
+#if CONFIG_WIDERLPF
+void vp9_mb_lpf_horizontal_edge_w_sse2(unsigned char *s,
+                                       int p,
+                                       const unsigned char *_blimit,
+                                       const unsigned char *_limit,
+                                       const unsigned char *_thresh) {
+  DECLARE_ALIGNED(16, unsigned char, flat2_op[7][16]);
+  DECLARE_ALIGNED(16, unsigned char, flat2_oq[7][16]);
+
+  DECLARE_ALIGNED(16, unsigned char, flat_op2[16]);
+  DECLARE_ALIGNED(16, unsigned char, flat_op1[16]);
+  DECLARE_ALIGNED(16, unsigned char, flat_op0[16]);
+  DECLARE_ALIGNED(16, unsigned char, flat_oq0[16]);
+  DECLARE_ALIGNED(16, unsigned char, flat_oq1[16]);
+  DECLARE_ALIGNED(16, unsigned char, flat_oq2[16]);
+  __m128i mask, hev, flat, flat2;
+  const __m128i zero = _mm_set1_epi16(0);
+  __m128i p7, p6, p5;
+  __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4;
+  __m128i q5, q6, q7;
+  int i = 0;
+  const unsigned int extended_thresh = _thresh[0] * 0x01010101u;
+  const unsigned int extended_limit  = _limit[0]  * 0x01010101u;
+  const unsigned int extended_blimit = _blimit[0] * 0x01010101u;
+  const __m128i thresh =
+      _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_thresh), 0);
+  const __m128i limit =
+      _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_limit), 0);
+  const __m128i blimit =
+      _mm_shuffle_epi32(_mm_cvtsi32_si128((int)extended_blimit), 0);
+
+  p4 = _mm_loadu_si128((__m128i *)(s - 5 * p));
+  p3 = _mm_loadu_si128((__m128i *)(s - 4 * p));
+  p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
+  p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
+  p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
+  q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
+  q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
+  q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
+  q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
+  q4 = _mm_loadu_si128((__m128i *)(s + 4 * p));
+  {
+    const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0),
+                                          _mm_subs_epu8(p0, p1));
+    const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
+                                          _mm_subs_epu8(q0, q1));
+    const __m128i one = _mm_set1_epi8(1);
+    const __m128i fe = _mm_set1_epi8(0xfe);
+    const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
+    __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
+                                    _mm_subs_epu8(q0, p0));
+    __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1),
+                                    _mm_subs_epu8(q1, p1));
+    __m128i work;
+    flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
+    hev = _mm_subs_epu8(flat, thresh);
+    hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
+
+    abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0);
+    abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
+    mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
+    mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
+    // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit) * -1;
+    mask = _mm_max_epu8(flat, mask);
+    // mask |= (abs(p1 - p0) > limit) * -1;
+    // mask |= (abs(q1 - q0) > limit) * -1;
+    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p1),
+                                     _mm_subs_epu8(p1, p2)),
+                         _mm_or_si128(_mm_subs_epu8(p3, p2),
+                                      _mm_subs_epu8(p2, p3)));
+    mask = _mm_max_epu8(work, mask);
+    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2, q1),
+                                     _mm_subs_epu8(q1, q2)),
+                         _mm_or_si128(_mm_subs_epu8(q3, q2),
+                                      _mm_subs_epu8(q2, q3)));
+    mask = _mm_max_epu8(work, mask);
+    mask = _mm_subs_epu8(mask, limit);
+    mask = _mm_cmpeq_epi8(mask, zero);
+
+    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p0),
+                                     _mm_subs_epu8(p0, p2)),
+                         _mm_or_si128(_mm_subs_epu8(q2, q0),
+                                      _mm_subs_epu8(q0, q2)));
+    flat = _mm_max_epu8(work, flat);
+    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p3, p0),
+                                     _mm_subs_epu8(p0, p3)),
+                         _mm_or_si128(_mm_subs_epu8(q3, q0),
+                                      _mm_subs_epu8(q0, q3)));
+    flat = _mm_max_epu8(work, flat);
+    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p4, p0),
+                                     _mm_subs_epu8(p0, p4)),
+                         _mm_or_si128(_mm_subs_epu8(q4, q0),
+                                      _mm_subs_epu8(q0, q4)));
+    flat = _mm_max_epu8(work, flat);
+    flat = _mm_subs_epu8(flat, one);
+    flat = _mm_cmpeq_epi8(flat, zero);
+    flat = _mm_and_si128(flat, mask);
+  }
+
+  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  // calculate flat2
+  p4 = _mm_loadu_si128((__m128i *)(s - 8 * p));
+  p3 = _mm_loadu_si128((__m128i *)(s - 7 * p));
+  p2 = _mm_loadu_si128((__m128i *)(s - 6 * p));
+  p1 = _mm_loadu_si128((__m128i *)(s - 5 * p));
+//  p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
+//  q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
+  q1 = _mm_loadu_si128((__m128i *)(s + 4 * p));
+  q2 = _mm_loadu_si128((__m128i *)(s + 5 * p));
+  q3 = _mm_loadu_si128((__m128i *)(s + 6 * p));
+  q4 = _mm_loadu_si128((__m128i *)(s + 7 * p));
+
+  {
+    const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0),
+                                          _mm_subs_epu8(p0, p1));
+    const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
+                                          _mm_subs_epu8(q0, q1));
+    const __m128i one = _mm_set1_epi8(1);
+    __m128i work;
+    flat2 = _mm_max_epu8(abs_p1p0, abs_q1q0);
+    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p0),
+                                     _mm_subs_epu8(p0, p2)),
+                         _mm_or_si128(_mm_subs_epu8(q2, q0),
+                                      _mm_subs_epu8(q0, q2)));
+    flat2 = _mm_max_epu8(work, flat2);
+    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p3, p0),
+                                     _mm_subs_epu8(p0, p3)),
+                         _mm_or_si128(_mm_subs_epu8(q3, q0),
+                                      _mm_subs_epu8(q0, q3)));
+    flat2 = _mm_max_epu8(work, flat2);
+    work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p4, p0),
+                                     _mm_subs_epu8(p0, p4)),
+                         _mm_or_si128(_mm_subs_epu8(q4, q0),
+                                      _mm_subs_epu8(q0, q4)));
+    flat2 = _mm_max_epu8(work, flat2);
+    flat2 = _mm_subs_epu8(flat2, one);
+    flat2 = _mm_cmpeq_epi8(flat2, zero);
+    flat2 = _mm_and_si128(flat2, flat);  // flat2 & flat & mask
+  }
+  // calculate flat2
+  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+  {
+    const __m128i four = _mm_set1_epi16(4);
+    unsigned char *src = s;
+    i = 0;
+    do {
+      __m128i workp_a, workp_b, workp_shft;
+      p4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 5 * p)), zero);
+      p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 4 * p)), zero);
+      p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 3 * p)), zero);
+      p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 2 * p)), zero);
+      p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 1 * p)), zero);
+      q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 0 * p)), zero);
+      q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 1 * p)), zero);
+      q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 2 * p)), zero);
+      q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 3 * p)), zero);
+      q4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 4 * p)), zero);
+
+      workp_a = _mm_add_epi16(_mm_add_epi16(p4, p3), _mm_add_epi16(p2, p1));
+      workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), p0);
+      workp_b = _mm_add_epi16(_mm_add_epi16(q0, p2), p4);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+      _mm_storel_epi64((__m128i *)&flat_op2[i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_b = _mm_add_epi16(_mm_add_epi16(q0, q1), p1);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+      _mm_storel_epi64((__m128i *)&flat_op1[i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p4), q2);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p1), p0);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+      _mm_storel_epi64((__m128i *)&flat_op0[i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q3);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p0), q0);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+      _mm_storel_epi64((__m128i *)&flat_oq0[i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p2), q4);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q0), q1);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+      _mm_storel_epi64((__m128i *)&flat_oq1[i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p1), q4);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q1), q2);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
+      _mm_storel_epi64((__m128i *)&flat_oq2[i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      src += 8;
+    } while (++i < 2);
+  }
+  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  // wide flat
+  // TODO(slavarnway): interleave with the flat pixel calculations (see above)
+  {
+    const __m128i eight = _mm_set1_epi16(8);
+    unsigned char *src = s;
+    int i = 0;
+    do {
+      __m128i workp_a, workp_b, workp_shft;
+      p7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 8 * p)), zero);
+      p6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 7 * p)), zero);
+      p5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 6 * p)), zero);
+      p4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 5 * p)), zero);
+      p3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 4 * p)), zero);
+      p2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 3 * p)), zero);
+      p1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 2 * p)), zero);
+      p0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 1 * p)), zero);
+      q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src - 0 * p)), zero);
+      q1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 1 * p)), zero);
+      q2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 2 * p)), zero);
+      q3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 3 * p)), zero);
+      q4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 4 * p)), zero);
+      q5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 5 * p)), zero);
+      q6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 6 * p)), zero);
+      q7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(src + 7 * p)), zero);
+
+
+      workp_a = _mm_sub_epi16(_mm_slli_epi16(p7, 3), p7);  // p7 * 7
+      workp_a = _mm_add_epi16(_mm_slli_epi16(p6, 1), workp_a);
+      workp_b = _mm_add_epi16(_mm_add_epi16(p5, p4), _mm_add_epi16(p3, p2));
+      workp_a = _mm_add_epi16(_mm_add_epi16(p1, p0), workp_a);
+      workp_b = _mm_add_epi16(_mm_add_epi16(q0, eight), workp_b);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_op[6][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p7), p5);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p6), q1);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_op[5][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p7), p4);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p5), q2);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_op[4][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p7), p3);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p4), q3);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_op[3][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p7), p2);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p3), q4);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_op[2][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p7), p1);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p2), q5);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_op[1][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p7), p0);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p1), q6);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_op[0][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p7), q0);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p0), q7);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_oq[0][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p6), q1);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q0), q7);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_oq[1][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p5), q2);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q1), q7);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_oq[2][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p4), q3);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q2), q7);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_oq[3][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q4);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q3), q7);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_oq[4][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p2), q5);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q4), q7);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_oq[5][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p1), q6);
+      workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q5), q7);
+      workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 4);
+      _mm_storel_epi64((__m128i *)&flat2_oq[6][i*8],
+                       _mm_packus_epi16(workp_shft, workp_shft));
+
+      src += 8;
+    } while (++i < 2);
+  }
+  // wide flat
+  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+  // lp filter
+  {
+    const __m128i t4 = _mm_set1_epi8(4);
+    const __m128i t3 = _mm_set1_epi8(3);
+    const __m128i t80 = _mm_set1_epi8(0x80);
+    const __m128i te0 = _mm_set1_epi8(0xe0);
+    const __m128i t1f = _mm_set1_epi8(0x1f);
+    const __m128i t1 = _mm_set1_epi8(0x1);
+    const __m128i t7f = _mm_set1_epi8(0x7f);
+
+    __m128i ps1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)),
+                                      t80);
+    __m128i ps0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)),
+                                      t80);
+    __m128i qs0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)),
+                                      t80);
+    __m128i qs1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)),
+                                      t80);
+    __m128i filt;
+    __m128i work_a;
+    __m128i filter1, filter2;
+
+    filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev);
+    work_a = _mm_subs_epi8(qs0, ps0);
+    filt = _mm_adds_epi8(filt, work_a);
+    filt = _mm_adds_epi8(filt, work_a);
+    filt = _mm_adds_epi8(filt, work_a);
+    /* (vp9_filter + 3 * (qs0 - ps0)) & mask */
+    filt = _mm_and_si128(filt, mask);
+
+    filter1 = _mm_adds_epi8(filt, t4);
+    filter2 = _mm_adds_epi8(filt, t3);
+
+    /* Filter1 >> 3 */
+    work_a = _mm_cmpgt_epi8(zero, filter1);
+    filter1 = _mm_srli_epi16(filter1, 3);
+    work_a = _mm_and_si128(work_a, te0);
+    filter1 = _mm_and_si128(filter1, t1f);
+    filter1 = _mm_or_si128(filter1, work_a);
+
+    /* Filter2 >> 3 */
+    work_a = _mm_cmpgt_epi8(zero, filter2);
+    filter2 = _mm_srli_epi16(filter2, 3);
+    work_a = _mm_and_si128(work_a, te0);
+    filter2 = _mm_and_si128(filter2, t1f);
+    filter2 = _mm_or_si128(filter2, work_a);
+
+    /* filt >> 1 */
+    filt = _mm_adds_epi8(filter1, t1);
+    work_a = _mm_cmpgt_epi8(zero, filt);
+    filt = _mm_srli_epi16(filt, 1);
+    work_a = _mm_and_si128(work_a, t80);
+    filt = _mm_and_si128(filt, t7f);
+    filt = _mm_or_si128(filt, work_a);
+
+    filt = _mm_andnot_si128(hev, filt);
+
+    ps0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80);
+    ps1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80);
+    qs0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80);
+    qs1 = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80);
+
+    // write out op6 - op3
+    {
+      unsigned char *dst = (s - 7 * p);
+      for (i = 6; i > 2; i--) {
+        __m128i flat2_output;
+        work_a = _mm_loadu_si128((__m128i *)dst);
+        flat2_output = _mm_load_si128((__m128i *)flat2_op[i]);
+        work_a = _mm_andnot_si128(flat2, work_a);
+        flat2_output = _mm_and_si128(flat2, flat2_output);
+        work_a = _mm_or_si128(work_a, flat2_output);
+        _mm_storeu_si128((__m128i *)dst, work_a);
+        dst += p;
+      }
+    }
+
+    work_a = _mm_loadu_si128((__m128i *)(s - 3 * p));
+    p2 = _mm_load_si128((__m128i *)flat_op2);
+    work_a = _mm_andnot_si128(flat, work_a);
+    p2 = _mm_and_si128(flat, p2);
+    work_a = _mm_or_si128(work_a, p2);
+    p2 = _mm_load_si128((__m128i *)flat2_op[2]);
+    work_a = _mm_andnot_si128(flat2, work_a);
+    p2 = _mm_and_si128(flat2, p2);
+    p2 = _mm_or_si128(work_a, p2);
+    _mm_storeu_si128((__m128i *)(s - 3 * p), p2);
+
+    p1 = _mm_load_si128((__m128i *)flat_op1);
+    work_a = _mm_andnot_si128(flat, ps1);
+    p1 = _mm_and_si128(flat, p1);
+    work_a = _mm_or_si128(work_a, p1);
+    p1 = _mm_load_si128((__m128i *)flat2_op[1]);
+    work_a = _mm_andnot_si128(flat2, work_a);
+    p1 = _mm_and_si128(flat2, p1);
+    p1 = _mm_or_si128(work_a, p1);
+    _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
+
+    p0 = _mm_load_si128((__m128i *)flat_op0);
+    work_a = _mm_andnot_si128(flat, ps0);
+    p0 = _mm_and_si128(flat, p0);
+    work_a = _mm_or_si128(work_a, p0);
+    p0 = _mm_load_si128((__m128i *)flat2_op[0]);
+    work_a = _mm_andnot_si128(flat2, work_a);
+    p0 = _mm_and_si128(flat2, p0);
+    p0 = _mm_or_si128(work_a, p0);
+    _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
+
+    q0 = _mm_load_si128((__m128i *)flat_oq0);
+    work_a = _mm_andnot_si128(flat, qs0);
+    q0 = _mm_and_si128(flat, q0);
+    work_a = _mm_or_si128(work_a, q0);
+    q0 = _mm_load_si128((__m128i *)flat2_oq[0]);
+    work_a = _mm_andnot_si128(flat2, work_a);
+    q0 = _mm_and_si128(flat2, q0);
+    q0 = _mm_or_si128(work_a, q0);
+    _mm_storeu_si128((__m128i *)(s - 0 * p), q0);
+
+    q1 = _mm_load_si128((__m128i *)flat_oq1);
+    work_a = _mm_andnot_si128(flat, qs1);
+    q1 = _mm_and_si128(flat, q1);
+    work_a = _mm_or_si128(work_a, q1);
+    q1 = _mm_load_si128((__m128i *)flat2_oq[1]);
+    work_a = _mm_andnot_si128(flat2, work_a);
+    q1 = _mm_and_si128(flat2, q1);
+    q1 = _mm_or_si128(work_a, q1);
+    _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
+
+    work_a = _mm_loadu_si128((__m128i *)(s + 2 * p));
+    q2 = _mm_load_si128((__m128i *)flat_oq2);
+    work_a = _mm_andnot_si128(flat, work_a);
+    q2 = _mm_and_si128(flat, q2);
+    work_a = _mm_or_si128(work_a, q2);
+    q2 = _mm_load_si128((__m128i *)flat2_oq[2]);
+    work_a = _mm_andnot_si128(flat2, work_a);
+    q2 = _mm_and_si128(flat2, q2);
+    q2 = _mm_or_si128(work_a, q2);
+    _mm_storeu_si128((__m128i *)(s + 2 * p), q2);
+
+    // write out oq3 - oq7
+    {
+      unsigned char *dst = (s + 3 * p);
+      for (i = 3; i < 7; i++) {
+        __m128i flat2_output;
+        work_a = _mm_loadu_si128((__m128i *)dst);
+        flat2_output = _mm_load_si128((__m128i *)flat2_oq[i]);
+        work_a = _mm_andnot_si128(flat2, work_a);
+        flat2_output = _mm_and_si128(flat2, flat2_output);
+        work_a = _mm_or_si128(work_a, flat2_output);
+        _mm_storeu_si128((__m128i *)dst, work_a);
+        dst += p;
+      }
+    }
+  }
+}
+#endif
+
 void vp9_mbloop_filter_horizontal_edge_sse2(unsigned char *s,
                                             int p,
                                             const unsigned char *_blimit,
@@ -562,6 +1038,39 @@ void vp9_mbloop_filter_vertical_edge_sse2(unsigned char *s,
   transpose(src, 16, dst, p, 2);
 }
 
+#if CONFIG_WIDERLPF
+void vp9_mb_lpf_vertical_edge_w_sse2(unsigned char *s,
+                                          int p,
+                                          const unsigned char *blimit,
+                                          const unsigned char *limit,
+                                          const unsigned char *thresh) {
+  DECLARE_ALIGNED_ARRAY(16, unsigned char, t_dst, 256);
+  unsigned char *src[4];
+  unsigned char *dst[4];
+
+  /* Transpose 16x16 */
+  transpose8x16(s - 8, s - 8 + p * 8, p, t_dst, 16);
+  transpose8x16(s, s + p * 8, p, t_dst + 16 * 8, 16);
+
+  /* Loop filtering */
+  vp9_mb_lpf_horizontal_edge_w_sse2(t_dst + 8 * 16, 16, blimit, limit,
+                                           thresh);
+
+  src[0] = t_dst;
+  src[1] = t_dst + 8 * 16;
+  src[2] = t_dst + 8;
+  src[3] = t_dst + 8 * 16 + 8;
+
+  dst[0] = s - 8;
+  dst[1] = s - 8 + 8;
+  dst[2] = s - 8 + p * 8;
+  dst[3] = s - 8 + p * 8 + 8;
+
+  /* Transpose 16x16 */
+  transpose(src, 16, dst, p, 4);
+}
+#endif
+
 void vp9_mbloop_filter_vertical_edge_uv_sse2(unsigned char *u,
                                              int p,
                                              const unsigned char *blimit,
@@ -608,8 +1117,8 @@ void vp9_loop_filter_mbh_sse2(unsigned char *y_ptr,
 void vp9_lpf_mbh_w_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
                            unsigned char *v_ptr, int y_stride, int uv_stride,
                            struct loop_filter_info *lfi) {
-  vp9_mb_lpf_horizontal_edge_w(y_ptr, y_stride,
-                                      lfi->mblim, lfi->lim, lfi->hev_thr, 2);
+  vp9_mb_lpf_horizontal_edge_w_sse2(y_ptr, y_stride,
+                                      lfi->mblim, lfi->lim, lfi->hev_thr);
 
   /* u,v */
   if (u_ptr)
@@ -642,8 +1151,8 @@ void vp9_loop_filter_mbv_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
 void vp9_lpf_mbv_w_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
                    unsigned char *v_ptr, int y_stride, int uv_stride,
                    struct loop_filter_info *lfi) {
-  vp9_mb_lpf_vertical_edge_w(y_ptr, y_stride,
-                                    lfi->mblim, lfi->lim, lfi->hev_thr, 2);
+  vp9_mb_lpf_vertical_edge_w_sse2(y_ptr, y_stride,
+                                    lfi->mblim, lfi->lim, lfi->hev_thr);
 
   /* u,v */
   if (u_ptr)