]> granicus.if.org Git - libvpx/blob - vp8/encoder/generic/csystemdependent.c
safety check to avoid divide by 0s
[libvpx] / vp8 / encoder / generic / csystemdependent.c
1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10
11
12 #include "vpx_ports/config.h"
13 #include "variance.h"
14 #include "onyx_int.h"
15
16
17 void vp8_arch_x86_encoder_init(VP8_COMP *cpi);
18
19
20 void (*vp8_fast_quantize_b)(BLOCK *b, BLOCKD *d);
21 extern void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d);
22
23 void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
24 extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc, int Fraction);
25
26 void vp8_cmachine_specific_config(VP8_COMP *cpi)
27 {
28 #if CONFIG_RUNTIME_CPU_DETECT
29     cpi->rtcd.common                    = &cpi->common.rtcd;
30     cpi->rtcd.variance.sad16x16              = vp8_sad16x16_c;
31     cpi->rtcd.variance.sad16x8               = vp8_sad16x8_c;
32     cpi->rtcd.variance.sad8x16               = vp8_sad8x16_c;
33     cpi->rtcd.variance.sad8x8                = vp8_sad8x8_c;
34     cpi->rtcd.variance.sad4x4                = vp8_sad4x4_c;
35
36     cpi->rtcd.variance.sad16x16x3            = vp8_sad16x16x3_c;
37     cpi->rtcd.variance.sad16x8x3             = vp8_sad16x8x3_c;
38     cpi->rtcd.variance.sad8x16x3             = vp8_sad8x16x3_c;
39     cpi->rtcd.variance.sad8x8x3              = vp8_sad8x8x3_c;
40     cpi->rtcd.variance.sad4x4x3              = vp8_sad4x4x3_c;
41
42     cpi->rtcd.variance.sad16x16x4d           = vp8_sad16x16x4d_c;
43     cpi->rtcd.variance.sad16x8x4d            = vp8_sad16x8x4d_c;
44     cpi->rtcd.variance.sad8x16x4d            = vp8_sad8x16x4d_c;
45     cpi->rtcd.variance.sad8x8x4d             = vp8_sad8x8x4d_c;
46     cpi->rtcd.variance.sad4x4x4d             = vp8_sad4x4x4d_c;
47
48     cpi->rtcd.variance.var4x4                = vp8_variance4x4_c;
49     cpi->rtcd.variance.var8x8                = vp8_variance8x8_c;
50     cpi->rtcd.variance.var8x16               = vp8_variance8x16_c;
51     cpi->rtcd.variance.var16x8               = vp8_variance16x8_c;
52     cpi->rtcd.variance.var16x16              = vp8_variance16x16_c;
53
54     cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_c;
55     cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_c;
56     cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_c;
57     cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_c;
58     cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_c;
59     cpi->rtcd.variance.subpixmse16x16        = vp8_sub_pixel_mse16x16_c;
60
61     cpi->rtcd.variance.mse16x16              = vp8_mse16x16_c;
62     cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c;
63
64     cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_c;
65     cpi->rtcd.variance.get8x8var             = vp8_get8x8var_c;
66     cpi->rtcd.variance.get16x16var           = vp8_get16x16var_c;;
67     cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_c;
68
69     cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_c;
70     cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_c;
71     cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_c;
72     cpi->rtcd.fdct.fast8x4                   = vp8_short_fdct8x4_c;
73     cpi->rtcd.fdct.walsh_short4x4            = vp8_short_walsh4x4_c;
74
75     cpi->rtcd.encodemb.berr                  = vp8_block_error_c;
76     cpi->rtcd.encodemb.mberr                 = vp8_mbblock_error_c;
77     cpi->rtcd.encodemb.mbuverr               = vp8_mbuverror_c;
78     cpi->rtcd.encodemb.subb                  = vp8_subtract_b_c;
79     cpi->rtcd.encodemb.submby                = vp8_subtract_mby_c;
80     cpi->rtcd.encodemb.submbuv               = vp8_subtract_mbuv_c;
81
82     cpi->rtcd.quantize.quantb                = vp8_regular_quantize_b;
83     cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_c;
84
85     cpi->rtcd.search.full_search             = vp8_full_search_sad;
86     cpi->rtcd.search.diamond_search          = vp8_diamond_search_sad;
87 #endif
88
89     // Pure C:
90     vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
91
92
93 #if ARCH_X86 || ARCH_X86_64
94     vp8_arch_x86_encoder_init(cpi);
95 #endif
96
97 }