]> granicus.if.org Git - libvpx/commitdiff
mips dsp-ase r2 vp9 decoder bilinear convolve optimizations
authorParag Salasakar <img.mips1@gmail.com>
Wed, 9 Oct 2013 12:35:27 +0000 (18:05 +0530)
committerParag Salasakar <img.mips1@gmail.com>
Wed, 9 Oct 2013 12:35:27 +0000 (18:05 +0530)
Change-Id: Ic31b4ef85e65070b4f8b9f26e068ccfaae00c4f0

12 files changed:
vp9/common/mips/dspr2/vp9_common_dspr2.h
vp9/common/mips/dspr2/vp9_convolve2_avg_dspr2.c [new file with mode: 0644]
vp9/common/mips/dspr2/vp9_convolve2_avg_horiz_dspr2.c [new file with mode: 0644]
vp9/common/mips/dspr2/vp9_convolve2_dspr2.c [new file with mode: 0644]
vp9/common/mips/dspr2/vp9_convolve2_horiz_dspr2.c [new file with mode: 0644]
vp9/common/mips/dspr2/vp9_convolve2_vert_dspr2.c [new file with mode: 0644]
vp9/common/mips/dspr2/vp9_convolve8_avg_dspr2.c
vp9/common/mips/dspr2/vp9_convolve8_avg_horiz_dspr2.c
vp9/common/mips/dspr2/vp9_convolve8_dspr2.c
vp9/common/mips/dspr2/vp9_convolve8_horiz_dspr2.c
vp9/common/mips/dspr2/vp9_convolve8_vert_dspr2.c
vp9/vp9_common.mk

index d2fa4c1dc2c9c163de83eb92bab3f21509283955..dc88f160330308239c6fbc24e83dab02cd66a30b 100644 (file)
@@ -81,5 +81,34 @@ static INLINE void vp9_prefetch_store_streamed(unsigned char *dst) {
   );
 }
 
+void vp9_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                               uint8_t *dst, ptrdiff_t dst_stride,
+                               const int16_t *filter_x, int x_step_q4,
+                               const int16_t *filter_y, int y_step_q4,
+                               int w, int h);
+
+void vp9_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                                   uint8_t *dst, ptrdiff_t dst_stride,
+                                   const int16_t *filter_x, int x_step_q4,
+                                   const int16_t *filter_y, int y_step_q4,
+                                   int w, int h);
+
+void vp9_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                                  uint8_t *dst, ptrdiff_t dst_stride,
+                                  const int16_t *filter_x, int x_step_q4,
+                                  const int16_t *filter_y, int y_step_q4,
+                                  int w, int h);
+
+void vp9_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                         uint8_t *dst, ptrdiff_t dst_stride,
+                         const int16_t *filter,
+                         int w, int h);
+
+void vp9_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                              uint8_t *dst, ptrdiff_t dst_stride,
+                              const int16_t *filter_x, int x_step_q4,
+                              const int16_t *filter_y, int y_step_q4,
+                              int w, int h);
+
 #endif  // #if HAVE_DSPR2
 #endif  // VP9_COMMON_VP9_COMMON_DSPR2_H_
diff --git a/vp9/common/mips/dspr2/vp9_convolve2_avg_dspr2.c b/vp9/common/mips/dspr2/vp9_convolve2_avg_dspr2.c
new file mode 100644 (file)
index 0000000..91d62bc
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_common.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+#include "vp9/common/vp9_convolve.h"
+#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
+
+#if HAVE_DSPR2
+static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src,
+                                         int32_t src_stride,
+                                         uint8_t *dst,
+                                         int32_t dst_stride,
+                                         const int16_t *filter_y,
+                                         int32_t w,
+                                         int32_t h) {
+  int32_t       x, y;
+  const uint8_t *src_ptr;
+  uint8_t       *dst_ptr;
+  uint8_t       *cm = vp9_ff_cropTbl;
+  uint32_t      vector4a = 64;
+  uint32_t      load1, load2;
+  uint32_t      p1, p2;
+  uint32_t      scratch1, scratch2;
+  uint32_t      store1, store2;
+  int32_t       Temp1, Temp2;
+  const int16_t *filter = &filter_y[3];
+  uint32_t      filter45;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    vp9_prefetch_store(dst + dst_stride);
+
+    for (x = 0; x < w; x += 4) {
+      src_ptr = src + x;
+      dst_ptr = dst + x;
+
+      __asm__ __volatile__ (
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+
+          "mtlo             %[vector4a],  $ac0                            \n\t"
+          "mtlo             %[vector4a],  $ac1                            \n\t"
+          "mtlo             %[vector4a],  $ac2                            \n\t"
+          "mtlo             %[vector4a],  $ac3                            \n\t"
+          "mthi             $zero,        $ac0                            \n\t"
+          "mthi             $zero,        $ac1                            \n\t"
+          "mthi             $zero,        $ac2                            \n\t"
+          "mthi             $zero,        $ac3                            \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
+
+          "extp             %[Temp1],     $ac0,           31              \n\t"
+          "extp             %[Temp2],     $ac1,           31              \n\t"
+
+          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
+          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
+          "extp             %[Temp1],     $ac2,           31              \n\t"
+
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
+          "extp             %[Temp2],     $ac3,           31              \n\t"
+          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
+
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
+          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
+
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
+
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),
+            [p1] "=&r" (p1), [p2] "=&r" (p2),
+            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+            [store1] "=&r" (store1), [store2] "=&r" (store2),
+            [src_ptr] "+r" (src_ptr)
+          : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+            [src_stride] "r" (src_stride), [cm] "r" (cm),
+            [dst_ptr] "r" (dst_ptr)
+      );
+    }
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src,
+                                          int32_t src_stride,
+                                          uint8_t *dst,
+                                          int32_t dst_stride,
+                                          const int16_t *filter_y,
+                                          int32_t h) {
+  int32_t       x, y;
+  const uint8_t *src_ptr;
+  uint8_t       *dst_ptr;
+  uint8_t       *cm = vp9_ff_cropTbl;
+  uint32_t      vector4a = 64;
+  uint32_t      load1, load2;
+  uint32_t      p1, p2;
+  uint32_t      scratch1, scratch2;
+  uint32_t      store1, store2;
+  int32_t       Temp1, Temp2;
+  const int16_t *filter = &filter_y[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    vp9_prefetch_store(dst + dst_stride);
+    vp9_prefetch_store(dst + dst_stride + 32);
+
+    for (x = 0; x < 64; x += 4) {
+      src_ptr = src + x;
+      dst_ptr = dst + x;
+
+      __asm__ __volatile__ (
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+
+          "mtlo             %[vector4a],  $ac0                            \n\t"
+          "mtlo             %[vector4a],  $ac1                            \n\t"
+          "mtlo             %[vector4a],  $ac2                            \n\t"
+          "mtlo             %[vector4a],  $ac3                            \n\t"
+          "mthi             $zero,        $ac0                            \n\t"
+          "mthi             $zero,        $ac1                            \n\t"
+          "mthi             $zero,        $ac2                            \n\t"
+          "mthi             $zero,        $ac3                            \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
+
+          "extp             %[Temp1],     $ac0,           31              \n\t"
+          "extp             %[Temp2],     $ac1,           31              \n\t"
+
+          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
+          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
+          "extp             %[Temp1],     $ac2,           31              \n\t"
+
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
+          "extp             %[Temp2],     $ac3,           31              \n\t"
+          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
+
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
+          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
+
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
+
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),
+            [p1] "=&r" (p1), [p2] "=&r" (p2),
+            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+            [store1] "=&r" (store1), [store2] "=&r" (store2),
+            [src_ptr] "+r" (src_ptr)
+          : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+            [src_stride] "r" (src_stride), [cm] "r" (cm),
+            [dst_ptr] "r" (dst_ptr)
+      );
+    }
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+void vp9_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                                  uint8_t *dst, ptrdiff_t dst_stride,
+                                  const int16_t *filter_x, int x_step_q4,
+                                  const int16_t *filter_y, int y_step_q4,
+                                  int w, int h) {
+  if (16 == y_step_q4) {
+    uint32_t pos = 38;
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+      "wrdsp      %[pos],     1           \n\t"
+      :
+      : [pos] "r" (pos)
+    );
+
+    vp9_prefetch_store(dst);
+
+    switch (w) {
+      case 4:
+      case 8:
+      case 16:
+      case 32:
+        convolve_bi_avg_vert_4_dspr2(src, src_stride,
+                                     dst, dst_stride,
+                                     filter_y, w, h);
+        break;
+      case 64:
+        vp9_prefetch_store(dst + 32);
+        convolve_bi_avg_vert_64_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_y, h);
+        break;
+      default:
+        vp9_convolve8_avg_vert_c(src, src_stride,
+                                 dst, dst_stride,
+                                 filter_x, x_step_q4,
+                                 filter_y, y_step_q4,
+                                 w, h);
+        break;
+    }
+  } else {
+    vp9_convolve8_avg_vert_c(src, src_stride,
+                             dst, dst_stride,
+                             filter_x, x_step_q4,
+                             filter_y, y_step_q4,
+                             w, h);
+  }
+}
+#endif
diff --git a/vp9/common/mips/dspr2/vp9_convolve2_avg_horiz_dspr2.c b/vp9/common/mips/dspr2/vp9_convolve2_avg_horiz_dspr2.c
new file mode 100644 (file)
index 0000000..148b20f
--- /dev/null
@@ -0,0 +1,833 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_common.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+#include "vp9/common/vp9_convolve.h"
+#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
+
+#if HAVE_DSPR2
+static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src,
+                                          int32_t src_stride,
+                                          uint8_t *dst,
+                                          int32_t dst_stride,
+                                          const int16_t *filter_x0,
+                                          int32_t h) {
+  int32_t y;
+  uint8_t *cm = vp9_ff_cropTbl;
+  int32_t  Temp1, Temp2, Temp3, Temp4;
+  uint32_t vector4a = 64;
+  uint32_t tp1, tp2;
+  uint32_t p1, p2, p3;
+  uint32_t tn1, tn2;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t      filter45;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    vp9_prefetch_load(src + src_stride);
+    vp9_prefetch_load(src + src_stride + 32);
+    vp9_prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],         0(%[src])                      \n\t"
+        "ulw              %[tp2],         4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp1],       $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "balign           %[tp2],         %[tp1],         3              \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],       $ac2,           31             \n\t"
+
+        "lbu              %[p2],          3(%[dst])                      \n\t"  /* load odd 2 */
+
+        /* odd 1. pixel */
+        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"  /* even 1 */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "lbu              %[Temp1],       1(%[dst])                      \n\t"  /* load odd 1 */
+        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p3],          %[tp2]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp2],       $ac3,           31             \n\t"
+
+        "lbu              %[tn2],         0(%[dst])                      \n\t"  /* load even 1 */
+
+        /* odd 2. pixel */
+        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t"  /* even 2 */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t"  /* odd 1 */
+        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t"  /* average even 1 */
+        "dpa.w.ph         $ac2,           %[p3],          %[filter45]    \n\t"
+        "extp             %[Temp4],       $ac2,           31             \n\t"
+
+        "lbu              %[tp1],         2(%[dst])                      \n\t"  /* load even 2 */
+        "sb               %[tn2],         0(%[dst])                      \n\t"  /* store even 1 */
+
+        /* clamp */
+        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t"  /* average odd 1 */
+        "lbux             %[p3],          %[Temp4](%[cm])                \n\t"  /* odd 2 */
+        "sb               %[Temp1],       1(%[dst])                      \n\t"  /* store odd 1 */
+
+        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t"  /* average even 2 */
+        "sb               %[tp1],         2(%[dst])                      \n\t"  /* store even 2 */
+
+        "addqh_r.w        %[p2],          %[p2],          %[p3]          \n\t"  /* average odd 2 */
+        "sb               %[p2],          3(%[dst])                      \n\t"  /* store odd 2 */
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
+        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src,
+                                         int32_t src_stride,
+                                         uint8_t *dst,
+                                         int32_t dst_stride,
+                                         const int16_t *filter_x0,
+                                         int32_t h) {
+  int32_t y;
+  uint8_t *cm = vp9_ff_cropTbl;
+  uint32_t vector4a = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t tp1, tp2, tp3, tp4;
+  uint32_t p1, p2, p3, p4, n1;
+  uint32_t st0, st1;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    vp9_prefetch_load(src + src_stride);
+    vp9_prefetch_load(src + src_stride + 32);
+    vp9_prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],         0(%[src])                      \n\t"
+        "ulw              %[tp2],         4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
+        "ulw              %[tp3],         8(%[src])                      \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp1],       $ac3,           31             \n\t"
+        "lbu              %[Temp2],       0(%[dst])                      \n\t"
+        "lbu              %[tp4],         2(%[dst])                      \n\t"
+
+        /* even 2. pixel */
+        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],       $ac2,           31             \n\t"
+
+        /* even 3. pixel */
+        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a],    $ac1                           \n\t"
+        "mthi             $zero,          $ac1                           \n\t"
+        "lbux             %[st1],         %[Temp3](%[cm])                \n\t"
+        "dpa.w.ph         $ac1,           %[p3],          %[filter45]    \n\t"
+        "extp             %[Temp1],       $ac1,           31             \n\t"
+
+        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
+        "addqh_r.w        %[tp4],         %[tp4],         %[st1]         \n\t"
+        "sb               %[Temp2],       0(%[dst])                      \n\t"
+        "sb               %[tp4],         2(%[dst])                      \n\t"
+
+        /* even 4. pixel */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+
+        "balign           %[tp3],         %[tp2],         3              \n\t"
+        "balign           %[tp2],         %[tp1],         3              \n\t"
+
+        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
+        "lbu              %[Temp2],       4(%[dst])                      \n\t"
+        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
+
+        "dpa.w.ph         $ac2,           %[p4],          %[filter45]    \n\t"
+        "extp             %[Temp3],       $ac2,           31             \n\t"
+
+        /* odd 1. pixel */
+        "mtlo             %[vector4a],    $ac1                           \n\t"
+        "mthi             $zero,          $ac1                           \n\t"
+        "sb               %[Temp2],       4(%[dst])                      \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp3]                         \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp3]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp2],       $ac3,           31             \n\t"
+
+        "lbu              %[tp1],         6(%[dst])                      \n\t"
+
+        /* odd 2. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "lbux             %[st0],         %[Temp3](%[cm])                \n\t"
+        "dpa.w.ph         $ac1,           %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],       $ac1,           31             \n\t"
+
+        "lbu              %[tp2],         1(%[dst])                      \n\t"
+        "lbu              %[tp3],         3(%[dst])                      \n\t"
+        "addqh_r.w        %[tp1],         %[tp1],         %[st0]         \n\t"
+
+        /* odd 3. pixel */
+        "lbux             %[st1],         %[Temp2](%[cm])                \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[filter45]    \n\t"
+        "addqh_r.w        %[tp2],         %[tp2],         %[st1]         \n\t"
+        "extp             %[Temp2],       $ac3,           31             \n\t"
+
+        "lbu              %[tp4],         5(%[dst])                      \n\t"
+
+        /* odd 4. pixel */
+        "sb               %[tp2],         1(%[dst])                      \n\t"
+        "sb               %[tp1],         6(%[dst])                      \n\t"
+        "dpa.w.ph         $ac2,           %[p4],          %[filter45]    \n\t"
+        "extp             %[Temp1],       $ac2,           31             \n\t"
+
+        "lbu              %[tp1],         7(%[dst])                      \n\t"
+
+        /* clamp */
+        "lbux             %[p4],          %[Temp3](%[cm])                \n\t"
+        "addqh_r.w        %[tp3],         %[tp3],         %[p4]          \n\t"
+
+        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"
+        "addqh_r.w        %[tp4],         %[tp4],         %[p2]          \n\t"
+
+        "lbux             %[p1],          %[Temp1](%[cm])                \n\t"
+        "addqh_r.w        %[tp1],         %[tp1],         %[p1]          \n\t"
+
+        /* store bytes */
+        "sb               %[tp3],         3(%[dst])                      \n\t"
+        "sb               %[tp4],         5(%[dst])                      \n\t"
+        "sb               %[tp1],         7(%[dst])                      \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
+          [st0] "=&r" (st0), [st1] "=&r" (st1),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [n1] "=&r" (n1),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr,
+                                          int32_t src_stride,
+                                          uint8_t *dst_ptr,
+                                          int32_t dst_stride,
+                                          const int16_t *filter_x0,
+                                          int32_t h,
+                                          int32_t count) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vp9_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    vp9_prefetch_load(src_ptr + src_stride);
+    vp9_prefetch_load(src_ptr + src_stride + 32);
+    vp9_prefetch_store(dst_ptr + dst_stride);
+
+    for (c = 0; c < count; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
+          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
+          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
+          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
+          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
+          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
+          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
+          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
+          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                   \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
+          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
+          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
+          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
+          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
+          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
+          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
+          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
+          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
+
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
+
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
+
+          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
+          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
+          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr,
+                                          int32_t src_stride,
+                                          uint8_t *dst_ptr,
+                                          int32_t dst_stride,
+                                          const int16_t *filter_x0,
+                                          int32_t h) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vp9_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    vp9_prefetch_load(src_ptr + src_stride);
+    vp9_prefetch_load(src_ptr + src_stride + 32);
+    vp9_prefetch_load(src_ptr + src_stride + 64);
+    vp9_prefetch_store(dst_ptr + dst_stride);
+    vp9_prefetch_store(dst_ptr + dst_stride + 32);
+
+    for (c = 0; c < 4; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
+          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
+          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
+          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
+          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
+          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
+          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
+          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
+          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                   \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
+          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
+          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
+          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
+          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
+          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
+          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
+          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
+          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
+
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
+
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
+
+          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
+          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
+          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+void vp9_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                                   uint8_t *dst, ptrdiff_t dst_stride,
+                                   const int16_t *filter_x, int x_step_q4,
+                                   const int16_t *filter_y, int y_step_q4,
+                                   int w, int h) {
+  if (16 == x_step_q4) {
+    uint32_t pos = 38;
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+      "wrdsp      %[pos],     1           \n\t"
+      :
+      : [pos] "r" (pos)
+    );
+
+    /* prefetch data to cache memory */
+    vp9_prefetch_load(src);
+    vp9_prefetch_load(src + 32);
+    vp9_prefetch_store(dst);
+
+    switch (w) {
+      case 4:
+        convolve_bi_avg_horiz_4_dspr2(src, src_stride,
+                                     dst, dst_stride,
+                                     filter_x, h);
+        break;
+      case 8:
+        convolve_bi_avg_horiz_8_dspr2(src, src_stride,
+                                     dst, dst_stride,
+                                     filter_x, h);
+        break;
+      case 16:
+        convolve_bi_avg_horiz_16_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_x, h, 1);
+        break;
+      case 32:
+        convolve_bi_avg_horiz_16_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_x, h, 2);
+        break;
+      case 64:
+        vp9_prefetch_load(src + 64);
+        vp9_prefetch_store(dst + 32);
+
+        convolve_bi_avg_horiz_64_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_x, h);
+        break;
+      default:
+        vp9_convolve8_avg_horiz_c(src, src_stride,
+                                  dst, dst_stride,
+                                  filter_x, x_step_q4,
+                                  filter_y, y_step_q4,
+                                  w, h);
+        break;
+    }
+  } else {
+    vp9_convolve8_avg_horiz_c(src, src_stride,
+                              dst, dst_stride,
+                              filter_x, x_step_q4,
+                              filter_y, y_step_q4,
+                              w, h);
+  }
+}
+#endif
diff --git a/vp9/common/mips/dspr2/vp9_convolve2_dspr2.c b/vp9/common/mips/dspr2/vp9_convolve2_dspr2.c
new file mode 100644 (file)
index 0000000..bc422bc
--- /dev/null
@@ -0,0 +1,784 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_common.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+#include "vp9/common/vp9_convolve.h"
+#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
+
+#if HAVE_DSPR2
+static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src,\r
+                                                 int32_t src_stride,\r
+                                                 uint8_t *dst,\r
+                                                 int32_t dst_stride,\r
+                                                 const int16_t *filter_x0,\r
+                                                 int32_t h) {\r
+  int32_t       y;\r
+  uint8_t       *cm = vp9_ff_cropTbl;\r
+  uint8_t       *dst_ptr;\r
+  int32_t       Temp1, Temp2;\r
+  uint32_t      vector4a = 64;\r
+  uint32_t      tp1, tp2;\r
+  uint32_t      p1, p2;\r
+  const int16_t *filter = &filter_x0[3];\r
+  uint32_t      filter45;\r
+\r
+  filter45 = ((const int32_t *)filter)[0];\r
+\r
+  for (y = h; y--;) {\r
+    dst_ptr = dst;\r
+    /* prefetch data to cache memory */\r
+    vp9_prefetch_load(src + src_stride);\r
+    vp9_prefetch_load(src + src_stride + 32);\r
+\r
+    __asm__ __volatile__ (\r
+        "ulw              %[tp1],         0(%[src])                      \n\t"\r
+        "ulw              %[tp2],         4(%[src])                      \n\t"\r
+\r
+        /* even 1. pixel */\r
+        "mtlo             %[vector4a],    $ac3                           \n\t"\r
+        "mthi             $zero,          $ac3                           \n\t"\r
+        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"\r
+        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"\r
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"\r
+        "extp             %[Temp1],       $ac3,           31             \n\t"\r
+\r
+        /* even 2. pixel */\r
+        "mtlo             %[vector4a],    $ac2                           \n\t"\r
+        "mthi             $zero,          $ac2                           \n\t"\r
+        "balign           %[tp2],         %[tp1],         3              \n\t"\r
+        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"\r
+        "extp             %[Temp2],       $ac2,           31             \n\t"\r
+\r
+        /* odd 1. pixel */\r
+        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"\r
+        "mtlo             %[vector4a],    $ac3                           \n\t"\r
+        "mthi             $zero,          $ac3                           \n\t"\r
+        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"\r
+        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"\r
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"\r
+        "extp             %[Temp1],       $ac3,           31             \n\t"\r
+\r
+        /* odd 2. pixel */\r
+        "lbux             %[tp2],         %[Temp2](%[cm])                \n\t"\r
+        "mtlo             %[vector4a],    $ac2                           \n\t"\r
+        "mthi             $zero,          $ac2                           \n\t"\r
+        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"\r
+        "extp             %[Temp2],       $ac2,           31             \n\t"\r
+\r
+        /* clamp */\r
+        "lbux             %[p1],          %[Temp1](%[cm])                \n\t"\r
+        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"\r
+\r
+        /* store bytes */\r
+        "sb               %[tp1],         0(%[dst_ptr])                  \n\t"\r
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"\r
+\r
+        "sb               %[p1],          0(%[dst_ptr])                  \n\t"\r
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"\r
+\r
+        "sb               %[tp2],         0(%[dst_ptr])                  \n\t"\r
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"\r
+\r
+        "sb               %[p2],          0(%[dst_ptr])                  \n\t"\r
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"\r
+\r
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),\r
+          [p1] "=&r" (p1), [p2] "=&r" (p2),\r
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),\r
+          [dst_ptr] "+r" (dst_ptr)\r
+        : [filter45] "r" (filter45),[vector4a] "r" (vector4a),\r
+          [cm] "r" (cm), [src] "r" (src), [dst_stride] "r" (dst_stride)\r
+    );\r
+\r
+    /* Next row... */\r
+    src += src_stride;\r
+    dst += 1;\r
+  }\r
+}
+
+static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src,\r
+                                                 int32_t src_stride,\r
+                                                 uint8_t *dst,\r
+                                                 int32_t dst_stride,\r
+                                                 const int16_t *filter_x0,\r
+                                                 int32_t h) {\r
+  int32_t y;\r
+  uint8_t *cm = vp9_ff_cropTbl;\r
+  uint8_t *dst_ptr;\r
+  uint32_t vector4a = 64;\r
+  int32_t Temp1, Temp2, Temp3;\r
+  uint32_t tp1, tp2, tp3;\r
+  uint32_t p1, p2, p3, p4;\r
+  uint8_t *odd_dst;\r
+  uint32_t dst_pitch_2 = (dst_stride << 1);\r
+  const int16_t *filter = &filter_x0[3];\r
+  uint32_t      filter45;\r
+\r
+  filter45 = ((const int32_t *)filter)[0];\r
+\r
+  for (y = h; y--;) {\r
+    /* prefetch data to cache memory */\r
+    vp9_prefetch_load(src + src_stride);\r
+    vp9_prefetch_load(src + src_stride + 32);\r
+\r
+    dst_ptr = dst;\r
+    odd_dst = (dst_ptr + dst_stride);\r
+\r
+    __asm__ __volatile__ (\r
+        "ulw              %[tp1],         0(%[src])                       \n\t"\r
+        "ulw              %[tp2],         4(%[src])                       \n\t"\r
+\r
+        /* even 1. pixel */\r
+        "mtlo             %[vector4a],    $ac3                            \n\t"\r
+        "mthi             $zero,          $ac3                            \n\t"\r
+        "mtlo             %[vector4a],    $ac2                            \n\t"\r
+        "mthi             $zero,          $ac2                            \n\t"\r
+        "preceu.ph.qbr    %[p1],          %[tp1]                          \n\t"\r
+        "preceu.ph.qbl    %[p2],          %[tp1]                          \n\t"\r
+        "preceu.ph.qbr    %[p3],          %[tp2]                          \n\t"\r
+        "preceu.ph.qbl    %[p4],          %[tp2]                          \n\t"\r
+        "ulw              %[tp3],         8(%[src])                       \n\t"\r
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]     \n\t"\r
+        "extp             %[Temp1],       $ac3,           31              \n\t"\r
+\r
+        /* even 2. pixel */\r
+        "dpa.w.ph         $ac2,           %[p2],          %[filter45]     \n\t"\r
+        "extp             %[Temp3],       $ac2,           31              \n\t"\r
+\r
+        /* even 3. pixel */\r
+        "lbux             %[Temp2],       %[Temp1](%[cm])                 \n\t"\r
+        "mtlo             %[vector4a],    $ac1                            \n\t"\r
+        "mthi             $zero,          $ac1                            \n\t"\r
+        "balign           %[tp3],         %[tp2],         3              \n\t"\r
+        "balign           %[tp2],         %[tp1],         3              \n\t"\r
+        "dpa.w.ph         $ac1,           %[p3],          %[filter45]     \n\t"\r
+        "lbux             %[tp1],         %[Temp3](%[cm])                 \n\t"\r
+        "extp             %[p3],          $ac1,           31              \n\t"\r
+\r
+        /* even 4. pixel */\r
+        "mtlo             %[vector4a],    $ac2                            \n\t"\r
+        "mthi             $zero,          $ac2                            \n\t"\r
+        "mtlo             %[vector4a],    $ac3                            \n\t"\r
+        "mthi             $zero,          $ac3                            \n\t"\r
+        "sb               %[Temp2],       0(%[dst_ptr])                   \n\t"\r
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"\r
+        "sb               %[tp1],         0(%[dst_ptr])                   \n\t"\r
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"\r
+\r
+        "dpa.w.ph         $ac2,           %[p4],          %[filter45]     \n\t"\r
+        "extp             %[Temp3],       $ac2,           31              \n\t"\r
+\r
+        "lbux             %[Temp1],         %[p3](%[cm])                    \n\t"\r
+\r
+        /* odd 1. pixel */\r
+        "mtlo             %[vector4a],    $ac1                            \n\t"\r
+        "mthi             $zero,          $ac1                            \n\t"\r
+        "preceu.ph.qbr    %[p1],          %[tp2]                          \n\t"\r
+        "preceu.ph.qbl    %[p2],          %[tp2]                          \n\t"\r
+        "preceu.ph.qbr    %[p3],          %[tp3]                          \n\t"\r
+        "preceu.ph.qbl    %[p4],          %[tp3]                          \n\t"\r
+        "sb               %[Temp1],       0(%[dst_ptr])                   \n\t"\r
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"\r
+\r
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]     \n\t"\r
+        "extp             %[Temp2],       $ac3,           31              \n\t"\r
+\r
+        /* odd 2. pixel */\r
+        "lbux             %[tp1],         %[Temp3](%[cm])                 \n\t"\r
+        "mtlo             %[vector4a],    $ac3                            \n\t"\r
+        "mthi             $zero,          $ac3                            \n\t"\r
+        "mtlo             %[vector4a],    $ac2                            \n\t"\r
+        "mthi             $zero,          $ac2                            \n\t"\r
+        "dpa.w.ph         $ac1,           %[p2],          %[filter45]     \n\t"\r
+        "sb               %[tp1],         0(%[dst_ptr])                   \n\t"\r
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"\r
+        "extp             %[Temp3],       $ac1,           31              \n\t"\r
+\r
+        /* odd 3. pixel */\r
+        "lbux             %[tp3],         %[Temp2](%[cm])                 \n\t"\r
+        "dpa.w.ph         $ac3,           %[p3],          %[filter45]     \n\t"\r
+        "extp             %[Temp2],       $ac3,           31              \n\t"\r
+\r
+        /* odd 4. pixel */\r
+        "sb               %[tp3],         0(%[odd_dst])                   \n\t"\r
+        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+        "dpa.w.ph         $ac2,           %[p4],          %[filter45]     \n\t"\r
+        "extp             %[Temp1],       $ac2,           31              \n\t"\r
+\r
+        /* clamp */\r
+        "lbux             %[p4],          %[Temp3](%[cm])                 \n\t"\r
+        "lbux             %[p2],          %[Temp2](%[cm])                 \n\t"\r
+        "lbux             %[p1],          %[Temp1](%[cm])                 \n\t"\r
+\r
+        /* store bytes */\r
+        "sb               %[p4],          0(%[odd_dst])                   \n\t"\r
+        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+\r
+        "sb               %[p2],          0(%[odd_dst])                   \n\t"\r
+        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+\r
+        "sb               %[p1],          0(%[odd_dst])                   \n\t"\r
+\r
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),\r
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),\r
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),\r
+          [dst_ptr] "+r" (dst_ptr), [odd_dst] "+r" (odd_dst)\r
+        : [filter45] "r" (filter45),[vector4a] "r" (vector4a), [cm] "r" (cm),\r
+          [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)\r
+    );\r
+\r
+    /* Next row... */\r
+    src += src_stride;\r
+    dst += 1;\r
+  }\r
+}
+
+static void convolve_bi_horiz_16_transposed_dspr2(const uint8_t *src_ptr,\r
+                                                  int32_t src_stride,\r
+                                                  uint8_t *dst_ptr,\r
+                                                  int32_t dst_stride,\r
+                                                  const int16_t *filter_x0,\r
+                                                  int32_t h,\r
+                                                  int32_t count) {\r
+  int32_t       c, y;\r
+  const uint8_t *src;\r
+  uint8_t       *dst;\r
+  uint8_t       *cm = vp9_ff_cropTbl;\r
+  uint32_t      vector_64 = 64;\r
+  int32_t       Temp1, Temp2, Temp3;\r
+  uint32_t      qload1, qload2;\r
+  uint32_t      p1, p2, p3, p4, p5;\r
+  uint32_t      st1, st2, st3;\r
+  uint32_t      dst_pitch_2 = (dst_stride << 1);\r
+  uint8_t       *odd_dst;\r
+  const int16_t *filter = &filter_x0[3];\r
+  uint32_t      filter45;\r
+\r
+  filter45 = ((const int32_t *)filter)[0];\r
+\r
+  for (y = h; y--;) {\r
+    /* prefetch data to cache memory */\r
+    vp9_prefetch_load(src_ptr + src_stride);\r
+    vp9_prefetch_load(src_ptr + src_stride + 32);\r
+\r
+    src = src_ptr;\r
+    dst = dst_ptr;\r
+\r
+    odd_dst = (dst + dst_stride);\r
+\r
+    for (c = 0; c < count; c++) {\r
+      __asm__ __volatile__ (\r
+          "ulw              %[qload1],        0(%[src])                       \n\t"\r
+          "ulw              %[qload2],        4(%[src])                       \n\t"\r
+\r
+          /* even 1. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */\r
+          "mthi             $zero,            $ac2                            \n\t"\r
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"\r
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"\r
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"\r
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"\r
+          "ulw              %[qload1],        8(%[src])                       \n\t"\r
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* even 1 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */\r
+\r
+          /* even 2. pixel */\r
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */\r
+          "mthi             $zero,            $ac3                            \n\t"\r
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"\r
+          "preceu.ph.qbl    %[p5],            %[qload1]                       \n\t"\r
+          "ulw              %[qload2],        12(%[src])                      \n\t"\r
+          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     \n\t" /* even 1 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */\r
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */\r
+\r
+          /* even 3. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "preceu.ph.qbr    %[p2],            %[qload2]                       \n\t"\r
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"\r
+          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     \n\t" /* even 3 */\r
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */\r
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */\r
+\r
+          /* even 4. pixel */\r
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */\r
+          "mthi             $zero,            $ac2                            \n\t"\r
+          "preceu.ph.qbl    %[p3],            %[qload2]                       \n\t"\r
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     \n\t" /* even 4 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */\r
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */\r
+\r
+          /* even 5. pixel */\r
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */\r
+          "mthi             $zero,            $ac3                            \n\t"\r
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     \n\t" /* even 5 */\r
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */\r
+\r
+          /* even 6. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "ulw              %[qload1],        20(%[src])                      \n\t"\r
+          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     \n\t" /* even 6 */\r
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */\r
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */\r
+\r
+          /* even 7. pixel */\r
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */\r
+          "mthi             $zero,            $ac2                            \n\t"\r
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"\r
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* even 7 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */\r
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */\r
+\r
+          /* even 8. pixel */\r
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */\r
+          "mthi             $zero,            $ac3                            \n\t"\r
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* even 8 */\r
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */\r
+\r
+          /* ODD pixels */\r
+          "ulw              %[qload1],        1(%[src])                       \n\t"\r
+          "ulw              %[qload2],        5(%[src])                       \n\t"\r
+\r
+          /* odd 1. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"\r
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"\r
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"\r
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"\r
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "ulw              %[qload2],        9(%[src])                       \n\t"\r
+          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     \n\t" /* odd 1 */\r
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */\r
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */\r
+\r
+          /* odd 2. pixel */\r
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */\r
+          "mthi             $zero,            $ac2                            \n\t"\r
+          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"\r
+          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"\r
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */\r
+          "ulw              %[qload1],        13(%[src])                      \n\t"\r
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* odd 2 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */\r
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */\r
+\r
+          /* odd 3. pixel */\r
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */\r
+          "mthi             $zero,            $ac3                            \n\t"\r
+          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"\r
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* odd 3 */\r
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */\r
+\r
+          /* odd 4. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"\r
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     \n\t" /* odd 4 */\r
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */\r
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */\r
+\r
+          /* odd 5. pixel */\r
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */\r
+          "mthi             $zero,            $ac2                            \n\t"\r
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* odd 5 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */\r
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */\r
+\r
+          /* odd 6. pixel */\r
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */\r
+          "mthi             $zero,            $ac3                            \n\t"\r
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+          "ulw              %[qload1],        21(%[src])                      \n\t"\r
+          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     \n\t" /* odd 6 */\r
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */\r
+\r
+          /* odd 7. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"\r
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     \n\t" /* odd 7 */\r
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */\r
+\r
+          /* odd 8. pixel */\r
+          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     \n\t" /* odd 8 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */\r
+\r
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */\r
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */\r
+\r
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+\r
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+\r
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */\r
+\r
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),\r
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),\r
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),\r
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),\r
+            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)\r
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),\r
+            [cm] "r" (cm),\r
+            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)\r
+      );\r
+\r
+      src += 16;\r
+      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));\r
+      odd_dst = (dst + dst_stride);\r
+    }\r
+\r
+    /* Next row... */\r
+    src_ptr += src_stride;\r
+    dst_ptr += 1;\r
+  }\r
+}\r
+
+static void convolve_bi_horiz_64_transposed_dspr2(const uint8_t *src_ptr,\r
+                                                  int32_t src_stride,\r
+                                                  uint8_t *dst_ptr,\r
+                                                  int32_t dst_stride,\r
+                                                  const int16_t *filter_x0,\r
+                                                  int32_t h) {\r
+  int32_t       c, y;\r
+  const uint8_t *src;\r
+  uint8_t       *dst;\r
+  uint8_t       *cm = vp9_ff_cropTbl;\r
+  uint32_t      vector_64 = 64;\r
+  int32_t       Temp1, Temp2, Temp3;\r
+  uint32_t      qload1, qload2;\r
+  uint32_t      p1, p2, p3, p4, p5;\r
+  uint32_t      st1, st2, st3;\r
+  uint32_t      dst_pitch_2 = (dst_stride << 1);\r
+  uint8_t       *odd_dst;\r
+  const int16_t *filter = &filter_x0[3];\r
+  uint32_t      filter45;\r
+\r
+  filter45 = ((const int32_t *)filter)[0];\r
+\r
+  for (y = h; y--;) {\r
+    /* prefetch data to cache memory */\r
+    vp9_prefetch_load(src_ptr + src_stride);\r
+    vp9_prefetch_load(src_ptr + src_stride + 32);\r
+    vp9_prefetch_load(src_ptr + src_stride + 64);\r
+\r
+    src = src_ptr;\r
+    dst = dst_ptr;\r
+\r
+    odd_dst = (dst + dst_stride);\r
+\r
+    for (c = 0; c < 4; c++) {\r
+      __asm__ __volatile__ (\r
+          "ulw              %[qload1],        0(%[src])                       \n\t"\r
+          "ulw              %[qload2],        4(%[src])                       \n\t"\r
+\r
+          /* even 1. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */\r
+          "mthi             $zero,            $ac2                            \n\t"\r
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"\r
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"\r
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"\r
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"\r
+          "ulw              %[qload1],        8(%[src])                       \n\t"\r
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* even 1 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */\r
+\r
+          /* even 2. pixel */\r
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */\r
+          "mthi             $zero,            $ac3                            \n\t"\r
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"\r
+          "preceu.ph.qbl    %[p5],            %[qload1]                       \n\t"\r
+          "ulw              %[qload2],        12(%[src])                      \n\t"\r
+          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     \n\t" /* even 1 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */\r
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */\r
+\r
+          /* even 3. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "preceu.ph.qbr    %[p2],            %[qload2]                       \n\t"\r
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"\r
+          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     \n\t" /* even 3 */\r
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */\r
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */\r
+\r
+          /* even 4. pixel */\r
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */\r
+          "mthi             $zero,            $ac2                            \n\t"\r
+          "preceu.ph.qbl    %[p3],            %[qload2]                       \n\t"\r
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     \n\t" /* even 4 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */\r
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */\r
+\r
+          /* even 5. pixel */\r
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */\r
+          "mthi             $zero,            $ac3                            \n\t"\r
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     \n\t" /* even 5 */\r
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */\r
+\r
+          /* even 6. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "ulw              %[qload1],        20(%[src])                      \n\t"\r
+          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     \n\t" /* even 6 */\r
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */\r
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */\r
+\r
+          /* even 7. pixel */\r
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */\r
+          "mthi             $zero,            $ac2                            \n\t"\r
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"\r
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* even 7 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */\r
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */\r
+\r
+          /* even 8. pixel */\r
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */\r
+          "mthi             $zero,            $ac3                            \n\t"\r
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* even 8 */\r
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */\r
+\r
+          /* ODD pixels */\r
+          "ulw              %[qload1],        1(%[src])                       \n\t"\r
+          "ulw              %[qload2],        5(%[src])                       \n\t"\r
+\r
+          /* odd 1. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"\r
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"\r
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"\r
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"\r
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */\r
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"\r
+          "ulw              %[qload2],        9(%[src])                       \n\t"\r
+          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     \n\t" /* odd 1 */\r
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */\r
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */\r
+\r
+          /* odd 2. pixel */\r
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */\r
+          "mthi             $zero,            $ac2                            \n\t"\r
+          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"\r
+          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"\r
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */\r
+          "ulw              %[qload1],        13(%[src])                      \n\t"\r
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* odd 2 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */\r
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */\r
+\r
+          /* odd 3. pixel */\r
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */\r
+          "mthi             $zero,            $ac3                            \n\t"\r
+          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"\r
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* odd 3 */\r
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */\r
+\r
+          /* odd 4. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"\r
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     \n\t" /* odd 4 */\r
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */\r
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */\r
+\r
+          /* odd 5. pixel */\r
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */\r
+          "mthi             $zero,            $ac2                            \n\t"\r
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* odd 5 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */\r
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */\r
+\r
+          /* odd 6. pixel */\r
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */\r
+          "mthi             $zero,            $ac3                            \n\t"\r
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+          "ulw              %[qload1],        21(%[src])                      \n\t"\r
+          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     \n\t" /* odd 6 */\r
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */\r
+\r
+          /* odd 7. pixel */\r
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */\r
+          "mthi             $zero,            $ac1                            \n\t"\r
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"\r
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     \n\t" /* odd 7 */\r
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */\r
+\r
+          /* odd 8. pixel */\r
+          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     \n\t" /* odd 8 */\r
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */\r
+\r
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */\r
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */\r
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */\r
+\r
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+\r
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */\r
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"\r
+\r
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */\r
+\r
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),\r
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),\r
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),\r
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),\r
+            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)\r
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),\r
+            [cm] "r" (cm),\r
+            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)\r
+      );\r
+\r
+      src += 16;\r
+      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));\r
+      odd_dst = (dst + dst_stride);\r
+    }\r
+\r
+    /* Next row... */\r
+    src_ptr += src_stride;\r
+    dst_ptr += 1;\r
+  }\r
+}\r
+
+void convolve_bi_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
+                                  uint8_t *dst, ptrdiff_t dst_stride,
+                                  const int16_t *filter, int w, int h) {
+  int x, y;
+
+  for (y = 0; y < h; ++y) {
+    for (x = 0; x < w; ++x) {
+      int sum = 0;
+
+      sum += src[x] * filter[3];
+      sum += src[x + 1] * filter[4];
+
+      dst[x * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+    }
+
+    src += src_stride;
+    dst += 1;
+  }
+}
+
+void vp9_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                         uint8_t *dst, ptrdiff_t dst_stride,
+                         const int16_t *filter,
+                         int w, int h) {
+  uint32_t pos = 38;
+
+  /* bit positon for extract from acc */
+  __asm__ __volatile__ (
+    "wrdsp      %[pos],     1           \n\t"
+    :
+    : [pos] "r" (pos)
+  );
+
+  /* prefetch data to cache memory */
+  vp9_prefetch_load(src);
+  vp9_prefetch_load(src + 32);
+
+  switch (w) {
+    case 4:
+      convolve_bi_horiz_4_transposed_dspr2(src, src_stride,
+                                           dst, dst_stride,
+                                           filter, h);
+      break;
+    case 8:
+      convolve_bi_horiz_8_transposed_dspr2(src, src_stride,
+                                           dst, dst_stride,
+                                           filter, h);
+      break;
+    case 16:
+    case 32:
+      convolve_bi_horiz_16_transposed_dspr2(src, src_stride,
+                                            dst, dst_stride,
+                                            filter, h,
+                                            (w/16));
+      break;
+    case 64:
+      vp9_prefetch_load(src + 32);
+      convolve_bi_horiz_64_transposed_dspr2(src, src_stride,
+                                            dst, dst_stride,
+                                            filter, h);
+      break;
+    default:
+      convolve_bi_horiz_transposed(src, src_stride,
+                                   dst, dst_stride,
+                                   filter, w, h);
+      break;
+  }
+}
+#endif
diff --git a/vp9/common/mips/dspr2/vp9_convolve2_horiz_dspr2.c b/vp9/common/mips/dspr2/vp9_convolve2_horiz_dspr2.c
new file mode 100644 (file)
index 0000000..1debdb4
--- /dev/null
@@ -0,0 +1,713 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_common.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+#include "vp9/common/vp9_convolve.h"
+#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
+
+#if HAVE_DSPR2
+static void convolve_bi_horiz_4_dspr2(const uint8_t *src,
+                                      int32_t src_stride,
+                                      uint8_t *dst,
+                                      int32_t dst_stride,
+                                      const int16_t *filter_x0,
+                                      int32_t h) {
+  int32_t y;
+  uint8_t *cm = vp9_ff_cropTbl;
+  int32_t Temp1, Temp2, Temp3, Temp4;
+  uint32_t vector4a = 64;
+  uint32_t tp1, tp2;
+  uint32_t p1, p2;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    vp9_prefetch_load(src + src_stride);
+    vp9_prefetch_load(src + src_stride + 32);
+    vp9_prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],      0(%[src])                      \n\t"
+        "ulw              %[tp2],      4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp1],    $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "balign           %[tp2],      %[tp1],         3              \n\t"
+        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],    $ac2,           31             \n\t"
+
+        /* odd 1. pixel */
+        "lbux             %[tp1],      %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp2],    $ac3,           31             \n\t"
+
+        /* odd 2. pixel */
+        "lbux             %[tp2],      %[Temp3](%[cm])                \n\t"
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp4],    $ac2,           31             \n\t"
+
+        /* clamp */
+        "lbux             %[p1],       %[Temp2](%[cm])                \n\t"
+        "lbux             %[p2],       %[Temp4](%[cm])                \n\t"
+
+        /* store bytes */
+        "sb               %[tp1],      0(%[dst])                      \n\t"
+        "sb               %[p1],       1(%[dst])                      \n\t"
+        "sb               %[tp2],      2(%[dst])                      \n\t"
+        "sb               %[p2],       3(%[dst])                      \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [p1] "=&r" (p1), [p2] "=&r" (p2),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
+        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_horiz_8_dspr2(const uint8_t *src,
+                                      int32_t src_stride,
+                                      uint8_t *dst,
+                                      int32_t dst_stride,
+                                      const int16_t *filter_x0,
+                                      int32_t h) {
+  int32_t y;
+  uint8_t *cm = vp9_ff_cropTbl;
+  uint32_t vector4a = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t tp1, tp2, tp3;
+  uint32_t p1, p2, p3, p4;
+  uint32_t st0, st1;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    vp9_prefetch_load(src + src_stride);
+    vp9_prefetch_load(src + src_stride + 32);
+    vp9_prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],      0(%[src])                      \n\t"
+        "ulw              %[tp2],      4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
+        "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
+        "ulw              %[tp3],      8(%[src])                      \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp1],    $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],    $ac2,           31             \n\t"
+
+        /* even 3. pixel */
+        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a], $ac1                           \n\t"
+        "mthi             $zero,       $ac1                           \n\t"
+        "dpa.w.ph         $ac1,        %[p3],          %[filter45]    \n\t"
+        "extp             %[Temp1],    $ac1,           31             \n\t"
+
+        /* even 4. pixel */
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "sb               %[st0],      0(%[dst])                      \n\t"
+        "lbux             %[st1],      %[Temp3](%[cm])                \n\t"
+
+        "balign           %[tp3],      %[tp2],         3              \n\t"
+        "balign           %[tp2],      %[tp1],         3              \n\t"
+
+        "dpa.w.ph         $ac2,        %[p4],          %[filter45]    \n\t"
+        "extp             %[Temp3],    $ac2,           31             \n\t"
+
+        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
+
+        /* odd 1. pixel */
+        "mtlo             %[vector4a], $ac1                           \n\t"
+        "mthi             $zero,       $ac1                           \n\t"
+        "sb               %[st1],      2(%[dst])                      \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
+        "preceu.ph.qbr    %[p3],       %[tp3]                         \n\t"
+        "preceu.ph.qbl    %[p4],       %[tp3]                         \n\t"
+        "sb               %[st0],      4(%[dst])                      \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp2],    $ac3,           31             \n\t"
+
+        /* odd 2. pixel */
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "lbux             %[st0],      %[Temp3](%[cm])                \n\t"
+        "dpa.w.ph         $ac1,        %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],    $ac1,           31             \n\t"
+
+        /* odd 3. pixel */
+        "lbux             %[st1],      %[Temp2](%[cm])                \n\t"
+        "dpa.w.ph         $ac3,        %[p3],          %[filter45]    \n\t"
+        "extp             %[Temp2],    $ac3,           31             \n\t"
+
+        /* odd 4. pixel */
+        "sb               %[st1],      1(%[dst])                      \n\t"
+        "sb               %[st0],      6(%[dst])                      \n\t"
+        "dpa.w.ph         $ac2,        %[p4],          %[filter45]    \n\t"
+        "extp             %[Temp1],    $ac2,           31             \n\t"
+
+        /* clamp */
+        "lbux             %[p4],       %[Temp3](%[cm])                \n\t"
+        "lbux             %[p2],       %[Temp2](%[cm])                \n\t"
+        "lbux             %[p1],       %[Temp1](%[cm])                \n\t"
+
+        /* store bytes */
+        "sb               %[p4],       3(%[dst])                      \n\t"
+        "sb               %[p2],       5(%[dst])                      \n\t"
+        "sb               %[p1],       7(%[dst])                      \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
+          [st0] "=&r" (st0), [st1] "=&r" (st1),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr,
+                                       int32_t src_stride,
+                                       uint8_t *dst_ptr,
+                                       int32_t dst_stride,
+                                       const int16_t *filter_x0,
+                                       int32_t h,
+                                       int32_t count) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vp9_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    vp9_prefetch_load(src_ptr + src_stride);
+    vp9_prefetch_load(src_ptr + src_stride + 32);
+    vp9_prefetch_store(dst_ptr + dst_stride);
+
+    for (c = 0; c < count; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
+          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                    \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+
+          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
+          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
+          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr,
+                                       int32_t src_stride,
+                                       uint8_t *dst_ptr,
+                                       int32_t dst_stride,
+                                       const int16_t *filter_x0,
+                                       int32_t h) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vp9_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    vp9_prefetch_load(src_ptr + src_stride);
+    vp9_prefetch_load(src_ptr + src_stride + 32);
+    vp9_prefetch_load(src_ptr + src_stride + 64);
+    vp9_prefetch_store(dst_ptr + dst_stride);
+    vp9_prefetch_store(dst_ptr + dst_stride + 32);
+
+    for (c = 0; c < 4; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
+          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                    \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+
+          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
+          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
+          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+void vp9_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                               uint8_t *dst, ptrdiff_t dst_stride,
+                               const int16_t *filter_x, int x_step_q4,
+                               const int16_t *filter_y, int y_step_q4,
+                               int w, int h) {
+  if (16 == x_step_q4) {
+    uint32_t pos = 38;
+
+    vp9_prefetch_load((const uint8_t *)filter_x);
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+      "wrdsp      %[pos],     1           \n\t"
+      :
+      : [pos] "r" (pos)
+    );
+
+    /* prefetch data to cache memory */
+    vp9_prefetch_load(src);
+    vp9_prefetch_load(src + 32);
+    vp9_prefetch_store(dst);
+
+    switch (w) {
+      case 4:
+        convolve_bi_horiz_4_dspr2(src, (int32_t)src_stride,
+                                  dst, (int32_t)dst_stride,
+                                  filter_x, (int32_t)h);
+        break;
+      case 8:
+        convolve_bi_horiz_8_dspr2(src, (int32_t)src_stride,
+                                  dst, (int32_t)dst_stride,
+                                  filter_x, (int32_t)h);
+        break;
+      case 16:
+        convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride,
+                                   dst, (int32_t)dst_stride,
+                                   filter_x, (int32_t)h, 1);
+        break;
+      case 32:
+        convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride,
+                                   dst, (int32_t)dst_stride,
+                                   filter_x, (int32_t)h, 2);
+        break;
+      case 64:
+        vp9_prefetch_load(src + 64);
+        vp9_prefetch_store(dst + 32);
+
+        convolve_bi_horiz_64_dspr2(src, (int32_t)src_stride,
+                                   dst, (int32_t)dst_stride,
+                                   filter_x, (int32_t)h);
+        break;
+      default:
+        vp9_convolve8_horiz_c(src, src_stride,
+                              dst, dst_stride,
+                              filter_x, x_step_q4,
+                              filter_y, y_step_q4,
+                              w, h);
+        break;
+    }
+  } else {
+    vp9_convolve8_horiz_c(src, src_stride,
+                          dst, dst_stride,
+                          filter_x, x_step_q4,
+                          filter_y, y_step_q4,
+                          w, h);
+  }
+}
+#endif
diff --git a/vp9/common/mips/dspr2/vp9_convolve2_vert_dspr2.c b/vp9/common/mips/dspr2/vp9_convolve2_vert_dspr2.c
new file mode 100644 (file)
index 0000000..8eb105c
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_config.h"
+#include "./vp9_rtcd.h"
+#include "vp9/common/vp9_common.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+#include "vp9/common/vp9_convolve.h"
+#include "vp9/common/mips/dspr2/vp9_common_dspr2.h"
+
+#if HAVE_DSPR2
+static void convolve_bi_vert_4_dspr2(const uint8_t *src,\r
+                                     int32_t src_stride,\r
+                                     uint8_t *dst,\r
+                                     int32_t dst_stride,\r
+                                     const int16_t *filter_y,\r
+                                     int32_t w,\r
+                                     int32_t h) {\r
+  int32_t       x, y;\r
+  const uint8_t *src_ptr;\r
+  uint8_t       *dst_ptr;\r
+  uint8_t       *cm = vp9_ff_cropTbl;\r
+  uint32_t      vector4a = 64;\r
+  uint32_t      load1, load2;\r
+  uint32_t      p1, p2;\r
+  uint32_t      scratch1;\r
+  uint32_t      store1, store2;\r
+  int32_t       Temp1, Temp2;\r
+  const int16_t *filter = &filter_y[3];\r
+  uint32_t      filter45;\r
+\r
+  filter45 = ((const int32_t *)filter)[0];\r
+\r
+  for (y = h; y--;) {\r
+    /* prefetch data to cache memory */\r
+    vp9_prefetch_store(dst + dst_stride);\r
+\r
+    for (x = 0; x < w; x += 4) {\r
+      src_ptr = src + x;\r
+      dst_ptr = dst + x;\r
+\r
+      __asm__ __volatile__ (\r
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"\r
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"\r
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"\r
+\r
+          "mtlo             %[vector4a],  $ac0                            \n\t"\r
+          "mtlo             %[vector4a],  $ac1                            \n\t"\r
+          "mtlo             %[vector4a],  $ac2                            \n\t"\r
+          "mtlo             %[vector4a],  $ac3                            \n\t"\r
+          "mthi             $zero,        $ac0                            \n\t"\r
+          "mthi             $zero,        $ac1                            \n\t"\r
+          "mthi             $zero,        $ac2                            \n\t"\r
+          "mthi             $zero,        $ac3                            \n\t"\r
+\r
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"\r
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"\r
+\r
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */\r
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */\r
+\r
+          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"\r
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"\r
+\r
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"\r
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"\r
+\r
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */\r
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */\r
+\r
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"\r
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"\r
+\r
+          "extp             %[Temp1],     $ac0,           31              \n\t"\r
+          "extp             %[Temp2],     $ac1,           31              \n\t"\r
+\r
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"\r
+          "extp             %[Temp1],     $ac2,           31              \n\t"\r
+\r
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"\r
+          "extp             %[Temp2],     $ac3,           31              \n\t"\r
+\r
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"\r
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"\r
+\r
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"\r
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"\r
+\r
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"\r
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"\r
+\r
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),\r
+            [p1] "=&r" (p1), [p2] "=&r" (p2),\r
+            [scratch1] "=&r" (scratch1),\r
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),\r
+            [store1] "=&r" (store1), [store2] "=&r" (store2),\r
+            [src_ptr] "+r" (src_ptr)\r
+          : [filter45] "r" (filter45),[vector4a] "r" (vector4a),\r
+            [src_stride] "r" (src_stride),\r
+            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)\r
+      );\r
+    }\r
+\r
+    /* Next row... */\r
+    src += src_stride;\r
+    dst += dst_stride;\r
+  }\r
+}\r
+
+static void convolve_bi_vert_64_dspr2(const uint8_t *src,
+                                      int32_t src_stride,
+                                      uint8_t *dst,
+                                      int32_t dst_stride,
+                                      const int16_t *filter_y,
+                                      int32_t h) {
+  int32_t       x, y;\r
+  const uint8_t *src_ptr;\r
+  uint8_t       *dst_ptr;\r
+  uint8_t       *cm = vp9_ff_cropTbl;\r
+  uint32_t      vector4a = 64;\r
+  uint32_t      load1, load2;\r
+  uint32_t      p1, p2;\r
+  uint32_t      scratch1;\r
+  uint32_t      store1, store2;\r
+  int32_t       Temp1, Temp2;\r
+  const int16_t *filter = &filter_y[3];\r
+  uint32_t      filter45;\r
+\r
+  filter45 = ((const int32_t *)filter)[0];\r
+\r
+  for (y = h; y--;) {\r
+    /* prefetch data to cache memory */\r
+    vp9_prefetch_store(dst + dst_stride);\r
+\r
+    for (x = 0; x < 64; x += 4) {\r
+      src_ptr = src + x;\r
+      dst_ptr = dst + x;\r
+\r
+      __asm__ __volatile__ (\r
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"\r
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"\r
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"\r
+\r
+          "mtlo             %[vector4a],  $ac0                            \n\t"\r
+          "mtlo             %[vector4a],  $ac1                            \n\t"\r
+          "mtlo             %[vector4a],  $ac2                            \n\t"\r
+          "mtlo             %[vector4a],  $ac3                            \n\t"\r
+          "mthi             $zero,        $ac0                            \n\t"\r
+          "mthi             $zero,        $ac1                            \n\t"\r
+          "mthi             $zero,        $ac2                            \n\t"\r
+          "mthi             $zero,        $ac3                            \n\t"\r
+\r
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"\r
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"\r
+\r
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */\r
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */\r
+\r
+          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"\r
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"\r
+\r
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"\r
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"\r
+\r
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */\r
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */\r
+\r
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"\r
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"\r
+\r
+          "extp             %[Temp1],     $ac0,           31              \n\t"\r
+          "extp             %[Temp2],     $ac1,           31              \n\t"\r
+\r
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"\r
+          "extp             %[Temp1],     $ac2,           31              \n\t"\r
+\r
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"\r
+          "extp             %[Temp2],     $ac3,           31              \n\t"\r
+\r
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"\r
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"\r
+\r
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"\r
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"\r
+\r
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"\r
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"\r
+\r
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),\r
+            [p1] "=&r" (p1), [p2] "=&r" (p2),\r
+            [scratch1] "=&r" (scratch1),\r
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),\r
+            [store1] "=&r" (store1), [store2] "=&r" (store2),\r
+            [src_ptr] "+r" (src_ptr)\r
+          : [filter45] "r" (filter45),[vector4a] "r" (vector4a),\r
+            [src_stride] "r" (src_stride),\r
+            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)\r
+      );\r
+    }\r
+\r
+    /* Next row... */\r
+    src += src_stride;\r
+    dst += dst_stride;\r
+  }\r
+}
+
+void vp9_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                              uint8_t *dst, ptrdiff_t dst_stride,
+                              const int16_t *filter_x, int x_step_q4,
+                              const int16_t *filter_y, int y_step_q4,
+                              int w, int h) {
+  if (16 == y_step_q4) {
+    uint32_t pos = 38;
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+      "wrdsp      %[pos],     1           \n\t"
+      :
+      : [pos] "r" (pos)
+    );
+
+    vp9_prefetch_store(dst);
+
+    switch (w) {
+      case 4 :
+      case 8 :
+      case 16 :
+      case 32 :
+        convolve_bi_vert_4_dspr2(src, src_stride,
+                                 dst, dst_stride,
+                                 filter_y, w, h);
+        break;
+      case 64 :
+        vp9_prefetch_store(dst + 32);
+        convolve_bi_vert_64_dspr2(src, src_stride,
+                                  dst, dst_stride,
+                                  filter_y, h);
+        break;
+      default:
+        vp9_convolve8_vert_c(src, src_stride,
+                             dst, dst_stride,
+                             filter_x, x_step_q4,
+                             filter_y, y_step_q4,
+                             w, h);
+        break;
+    }
+  } else {
+    vp9_convolve8_vert_c(src, src_stride,
+                         dst, dst_stride,
+                         filter_x, x_step_q4,
+                         filter_y, y_step_q4,
+                         w, h);
+  }
+}
+#endif
index 0930ad12362e671a251f9911961d318c37670373..da7f0fdbb7ebfbc7afe3129552521e32b8cf792d 100644 (file)
@@ -355,6 +355,12 @@ void vp9_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                      filter_x, x_step_q4,
                      filter_y, y_step_q4,
                      w, h);
+  } else if (((const int32_t *)filter_y)[0] == 0) {
+    vp9_convolve2_avg_vert_dspr2(src, src_stride,
+                                 dst, dst_stride,
+                                 filter_x, x_step_q4,
+                                 filter_y, y_step_q4,
+                                 w, h);
   } else {
     if (16 == y_step_q4) {
       uint32_t pos = 38;
index 37c665be91126e93d019d17e603c8d36bcee098d..69da1cfd6a15fbfa4c5a188a016bc27c3fafd516 100644 (file)
@@ -965,6 +965,12 @@ void vp9_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                      filter_x, x_step_q4,
                      filter_y, y_step_q4,
                      w, h);
+  } else if (((const int32_t *)filter_x)[0] == 0) {
+    vp9_convolve2_avg_horiz_dspr2(src, src_stride,
+                                  dst, dst_stride,
+                                  filter_x, x_step_q4,
+                                  filter_y, y_step_q4,
+                                  w, h);
   } else {
     if (16 == x_step_q4) {
       uint32_t pos = 38;
index 2c48bd03861a4f0ce1362774cffa958bcee0a319..126e05a672256809ae09d4a7a24146502691e086 100644 (file)
@@ -930,6 +930,21 @@ void convolve_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
   }
 }
 
+void copy_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
+                           uint8_t *dst, ptrdiff_t dst_stride,
+                           int w, int h) {
+  int x, y;
+
+  for (y = 0; y < h; ++y) {
+    for (x = 0; x < w; ++x) {
+      dst[x * dst_stride] = src[x];
+    }
+
+    src += src_stride;
+    dst += 1;
+  }
+}
+
 void vp9_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                          uint8_t *dst, ptrdiff_t dst_stride,
                          const int16_t *filter_x, int x_step_q4,
@@ -966,20 +981,14 @@ void vp9_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 
   /* copy the src to dst */
   if (filter_x[3] == 0x80) {
-    int32_t y;
-    int32_t c;
-    const uint8_t *src_ptr = src - src_stride * 3;
-    uint8_t *dst_ptr = temp;
-
-    for (y = intermediate_height; y--;) {
-      for (c = 0; c < w; c++) {
-        dst_ptr[c * intermediate_height] = src_ptr[c];
-      }
-
-      /* next row... */
-      src_ptr += src_stride;
-      dst_ptr += 1;
-    }
+    copy_horiz_transposed(src - src_stride * 3, src_stride,
+                          temp, intermediate_height,
+                          w, intermediate_height);
+  } else if (((const int32_t *)filter_x)[0] == 0) {
+    vp9_convolve2_dspr2(src - src_stride * 3, src_stride,
+                        temp, intermediate_height,
+                        filter_x,
+                        w, intermediate_height);
   } else {
     src -= (src_stride * 3 + 3);
 
@@ -1021,20 +1030,14 @@ void vp9_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride,
 
   /* copy the src to dst */
   if (filter_y[3] == 0x80) {
-    int32_t y;
-    int32_t c;
-    uint8_t *src_ptr = temp + 3;
-    uint8_t *dst_ptr = dst;
-
-    for (y = w; y--;) {
-      for (c = 0; c < h; c++) {
-        dst_ptr[c * dst_stride] = src_ptr[c];
-      }
-
-      /* next row... */
-      src_ptr += intermediate_height;
-      dst_ptr += 1;
-    }
+    copy_horiz_transposed(temp + 3, intermediate_height,
+                          dst, dst_stride,
+                          h, w);
+  } else if (((const int32_t *)filter_y)[0] == 0) {
+    vp9_convolve2_dspr2(temp + 3, intermediate_height,
+                        dst, dst_stride,
+                        filter_y,
+                        h, w);
   } else {
     switch (h) {
       case 4:
index 743d641162704045aa63a2579a187605d898b65b..0303896156b33fde1af41d267efeb6b5229f2e84 100644 (file)
@@ -849,6 +849,12 @@ void vp9_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                       filter_x, x_step_q4,
                       filter_y, y_step_q4,
                       w, h);
+  } else if (((const int32_t *)filter_x)[0] == 0) {
+    vp9_convolve2_horiz_dspr2(src, src_stride,
+                              dst, dst_stride,
+                              filter_x, x_step_q4,
+                              filter_y, y_step_q4,
+                              w, h);
   } else {
     if (16 == x_step_q4) {
       uint32_t pos = 38;
index bdc7930b78b14deb8b7b1edecae8f4bd63610917..0930bb3d83e0d46dff893641579a74425b7c676e 100644 (file)
@@ -341,6 +341,12 @@ void vp9_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
                       filter_x, x_step_q4,
                       filter_y, y_step_q4,
                       w, h);
+  } else if (((const int32_t *)filter_y)[0] == 0) {
+    vp9_convolve2_vert_dspr2(src, src_stride,
+                             dst, dst_stride,
+                             filter_x, x_step_q4,
+                             filter_y, y_step_q4,
+                             w, h);
   } else {
     if (16 == y_step_q4) {
       uint32_t pos = 38;
index 10fa461d83529eaf1c7a91d2dd6150c88e0aea00..99b21e2d520a1ebcc0577dc23ee99aba44b293ec 100644 (file)
@@ -89,6 +89,11 @@ endif
 
 # common (c)
 VP9_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/vp9_common_dspr2.h
+VP9_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/vp9_convolve2_avg_dspr2.c
+VP9_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/vp9_convolve2_avg_horiz_dspr2.c
+VP9_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/vp9_convolve2_dspr2.c
+VP9_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/vp9_convolve2_horiz_dspr2.c
+VP9_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/vp9_convolve2_vert_dspr2.c
 VP9_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/vp9_convolve8_avg_dspr2.c
 VP9_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/vp9_convolve8_avg_horiz_dspr2.c
 VP9_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/vp9_convolve8_dspr2.c