2 * Copyright 2011 The LibYuv Project Authors. All rights reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "libyuv/scale.h"
16 #include "libyuv/cpu_id.h"
17 #include "libyuv/planar_functions.h" // For CopyARGB
18 #include "libyuv/row.h"
19 #include "libyuv/scale_row.h"
26 static __inline int Abs(int v) {
27 return v >= 0 ? v : -v;
30 // ScaleARGB ARGB, 1/2
31 // This is an optimized version for scaling down a ARGB to 1/2 of
33 static void ScaleARGBDown2(int src_width, int src_height,
34 int dst_width, int dst_height,
35 int src_stride, int dst_stride,
36 const uint8* src_argb, uint8* dst_argb,
37 int x, int dx, int y, int dy,
38 enum FilterMode filtering) {
40 int row_stride = src_stride * (dy >> 16);
41 void (*ScaleARGBRowDown2)(const uint8* src_argb, ptrdiff_t src_stride,
42 uint8* dst_argb, int dst_width) =
43 filtering == kFilterNone ? ScaleARGBRowDown2_C :
44 (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_C :
45 ScaleARGBRowDown2Box_C);
46 assert(dx == 65536 * 2); // Test scale factor of 2.
47 assert((dy & 0x1ffff) == 0); // Test vertical scale is multiple of 2.
48 // Advance to odd row, even column.
49 if (filtering == kFilterBilinear) {
50 src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
52 src_argb += (y >> 16) * src_stride + ((x >> 16) - 1) * 4;
55 #if defined(HAS_SCALEARGBROWDOWN2_SSE2)
56 if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 4)) {
57 ScaleARGBRowDown2 = filtering == kFilterNone ? ScaleARGBRowDown2_SSE2 :
58 (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_SSE2 :
59 ScaleARGBRowDown2Box_SSE2);
62 #if defined(HAS_SCALEARGBROWDOWN2_NEON)
63 if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(dst_width, 8)) {
64 ScaleARGBRowDown2 = filtering ? ScaleARGBRowDown2Box_NEON :
65 ScaleARGBRowDown2_NEON;
69 if (filtering == kFilterLinear) {
72 for (j = 0; j < dst_height; ++j) {
73 ScaleARGBRowDown2(src_argb, src_stride, dst_argb, dst_width);
74 src_argb += row_stride;
75 dst_argb += dst_stride;
79 // ScaleARGB ARGB, 1/4
80 // This is an optimized version for scaling down a ARGB to 1/4 of
82 static void ScaleARGBDown4Box(int src_width, int src_height,
83 int dst_width, int dst_height,
84 int src_stride, int dst_stride,
85 const uint8* src_argb, uint8* dst_argb,
86 int x, int dx, int y, int dy) {
88 // Allocate 2 rows of ARGB.
89 const int kRowSize = (dst_width * 2 * 4 + 15) & ~15;
90 align_buffer_64(row, kRowSize * 2);
91 int row_stride = src_stride * (dy >> 16);
92 void (*ScaleARGBRowDown2)(const uint8* src_argb, ptrdiff_t src_stride,
93 uint8* dst_argb, int dst_width) = ScaleARGBRowDown2Box_C;
94 // Advance to odd row, even column.
95 src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
96 assert(dx == 65536 * 4); // Test scale factor of 4.
97 assert((dy & 0x3ffff) == 0); // Test vertical scale is multiple of 4.
98 #if defined(HAS_SCALEARGBROWDOWN2_SSE2)
99 if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 4)) {
100 ScaleARGBRowDown2 = ScaleARGBRowDown2Box_SSE2;
103 #if defined(HAS_SCALEARGBROWDOWN2_NEON)
104 if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(dst_width, 8)) {
105 ScaleARGBRowDown2 = ScaleARGBRowDown2Box_NEON;
108 for (j = 0; j < dst_height; ++j) {
109 ScaleARGBRowDown2(src_argb, src_stride, row, dst_width * 2);
110 ScaleARGBRowDown2(src_argb + src_stride * 2, src_stride,
111 row + kRowSize, dst_width * 2);
112 ScaleARGBRowDown2(row, kRowSize, dst_argb, dst_width);
113 src_argb += row_stride;
114 dst_argb += dst_stride;
116 free_aligned_buffer_64(row);
119 // ScaleARGB ARGB Even
120 // This is an optimized version for scaling down a ARGB to even
121 // multiple of its original size.
122 static void ScaleARGBDownEven(int src_width, int src_height,
123 int dst_width, int dst_height,
124 int src_stride, int dst_stride,
125 const uint8* src_argb, uint8* dst_argb,
126 int x, int dx, int y, int dy,
127 enum FilterMode filtering) {
129 int col_step = dx >> 16;
130 int row_stride = (dy >> 16) * src_stride;
131 void (*ScaleARGBRowDownEven)(const uint8* src_argb, ptrdiff_t src_stride,
132 int src_step, uint8* dst_argb, int dst_width) =
133 filtering ? ScaleARGBRowDownEvenBox_C : ScaleARGBRowDownEven_C;
134 assert(IS_ALIGNED(src_width, 2));
135 assert(IS_ALIGNED(src_height, 2));
136 src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
137 #if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
138 if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 4)) {
139 ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_SSE2 :
140 ScaleARGBRowDownEven_SSE2;
143 #if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
144 if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(dst_width, 4)) {
145 ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_NEON :
146 ScaleARGBRowDownEven_NEON;
150 if (filtering == kFilterLinear) {
153 for (j = 0; j < dst_height; ++j) {
154 ScaleARGBRowDownEven(src_argb, src_stride, col_step, dst_argb, dst_width);
155 src_argb += row_stride;
156 dst_argb += dst_stride;
160 // Scale ARGB down with bilinear interpolation.
161 static void ScaleARGBBilinearDown(int src_width, int src_height,
162 int dst_width, int dst_height,
163 int src_stride, int dst_stride,
164 const uint8* src_argb, uint8* dst_argb,
165 int x, int dx, int y, int dy,
166 enum FilterMode filtering) {
168 void (*InterpolateRow)(uint8* dst_argb, const uint8* src_argb,
169 ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
171 void (*ScaleARGBFilterCols)(uint8* dst_argb, const uint8* src_argb,
172 int dst_width, int x, int dx) =
173 (src_width >= 32768) ? ScaleARGBFilterCols64_C : ScaleARGBFilterCols_C;
174 int64 xlast = x + (int64)(dst_width - 1) * dx;
175 int64 xl = (dx >= 0) ? x : xlast;
176 int64 xr = (dx >= 0) ? xlast : x;
178 xl = (xl >> 16) & ~3; // Left edge aligned.
179 xr = (xr >> 16) + 1; // Right most pixel used. Bilinear uses 2 pixels.
180 xr = (xr + 1 + 3) & ~3; // 1 beyond 4 pixel aligned right most pixel.
181 if (xr > src_width) {
184 clip_src_width = (int)(xr - xl) * 4; // Width aligned to 4.
186 x -= (int)(xl << 16);
187 #if defined(HAS_INTERPOLATEROW_SSE2)
188 if (TestCpuFlag(kCpuHasSSE2)) {
189 InterpolateRow = InterpolateRow_Any_SSE2;
190 if (IS_ALIGNED(clip_src_width, 16)) {
191 InterpolateRow = InterpolateRow_SSE2;
195 #if defined(HAS_INTERPOLATEROW_SSSE3)
196 if (TestCpuFlag(kCpuHasSSSE3)) {
197 InterpolateRow = InterpolateRow_Any_SSSE3;
198 if (IS_ALIGNED(clip_src_width, 16)) {
199 InterpolateRow = InterpolateRow_SSSE3;
203 #if defined(HAS_INTERPOLATEROW_AVX2)
204 if (TestCpuFlag(kCpuHasAVX2)) {
205 InterpolateRow = InterpolateRow_Any_AVX2;
206 if (IS_ALIGNED(clip_src_width, 32)) {
207 InterpolateRow = InterpolateRow_AVX2;
211 #if defined(HAS_INTERPOLATEROW_NEON)
212 if (TestCpuFlag(kCpuHasNEON)) {
213 InterpolateRow = InterpolateRow_Any_NEON;
214 if (IS_ALIGNED(clip_src_width, 16)) {
215 InterpolateRow = InterpolateRow_NEON;
219 #if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
220 if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
221 IS_ALIGNED(src_argb, 4) && IS_ALIGNED(src_stride, 4)) {
222 InterpolateRow = InterpolateRow_Any_MIPS_DSPR2;
223 if (IS_ALIGNED(clip_src_width, 4)) {
224 InterpolateRow = InterpolateRow_MIPS_DSPR2;
228 #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
229 if (TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
230 ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
233 // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear.
234 // Allocate a row of ARGB.
236 align_buffer_64(row, clip_src_width * 4);
238 const int max_y = (src_height - 1) << 16;
242 for (j = 0; j < dst_height; ++j) {
244 const uint8* src = src_argb + yi * src_stride;
245 if (filtering == kFilterLinear) {
246 ScaleARGBFilterCols(dst_argb, src, dst_width, x, dx);
248 int yf = (y >> 8) & 255;
249 InterpolateRow(row, src, src_stride, clip_src_width, yf);
250 ScaleARGBFilterCols(dst_argb, row, dst_width, x, dx);
252 dst_argb += dst_stride;
258 free_aligned_buffer_64(row);
262 // Scale ARGB up with bilinear interpolation.
263 static void ScaleARGBBilinearUp(int src_width, int src_height,
264 int dst_width, int dst_height,
265 int src_stride, int dst_stride,
266 const uint8* src_argb, uint8* dst_argb,
267 int x, int dx, int y, int dy,
268 enum FilterMode filtering) {
270 void (*InterpolateRow)(uint8* dst_argb, const uint8* src_argb,
271 ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
273 void (*ScaleARGBFilterCols)(uint8* dst_argb, const uint8* src_argb,
274 int dst_width, int x, int dx) =
275 filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C;
276 const int max_y = (src_height - 1) << 16;
277 #if defined(HAS_INTERPOLATEROW_SSE2)
278 if (TestCpuFlag(kCpuHasSSE2)) {
279 InterpolateRow = InterpolateRow_Any_SSE2;
280 if (IS_ALIGNED(dst_width, 4)) {
281 InterpolateRow = InterpolateRow_SSE2;
285 #if defined(HAS_INTERPOLATEROW_SSSE3)
286 if (TestCpuFlag(kCpuHasSSSE3)) {
287 InterpolateRow = InterpolateRow_Any_SSSE3;
288 if (IS_ALIGNED(dst_width, 4)) {
289 InterpolateRow = InterpolateRow_SSSE3;
293 #if defined(HAS_INTERPOLATEROW_AVX2)
294 if (TestCpuFlag(kCpuHasAVX2)) {
295 InterpolateRow = InterpolateRow_Any_AVX2;
296 if (IS_ALIGNED(dst_width, 8)) {
297 InterpolateRow = InterpolateRow_AVX2;
301 #if defined(HAS_INTERPOLATEROW_NEON)
302 if (TestCpuFlag(kCpuHasNEON)) {
303 InterpolateRow = InterpolateRow_Any_NEON;
304 if (IS_ALIGNED(dst_width, 4)) {
305 InterpolateRow = InterpolateRow_NEON;
309 #if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
310 if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
311 IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride, 4)) {
312 InterpolateRow = InterpolateRow_MIPS_DSPR2;
315 if (src_width >= 32768) {
316 ScaleARGBFilterCols = filtering ?
317 ScaleARGBFilterCols64_C : ScaleARGBCols64_C;
319 #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
320 if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
321 ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
324 #if defined(HAS_SCALEARGBCOLS_SSE2)
325 if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
326 ScaleARGBFilterCols = ScaleARGBCols_SSE2;
329 if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
330 ScaleARGBFilterCols = ScaleARGBColsUp2_C;
331 #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
332 if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
333 ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2;
344 const uint8* src = src_argb + yi * src_stride;
346 // Allocate 2 rows of ARGB.
347 const int kRowSize = (dst_width * 4 + 15) & ~15;
348 align_buffer_64(row, kRowSize * 2);
351 int rowstride = kRowSize;
354 ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
355 if (src_height > 1) {
358 ScaleARGBFilterCols(rowptr + rowstride, src, dst_width, x, dx);
361 for (j = 0; j < dst_height; ++j) {
367 src = src_argb + yi * src_stride;
370 ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
372 rowstride = -rowstride;
377 if (filtering == kFilterLinear) {
378 InterpolateRow(dst_argb, rowptr, 0, dst_width * 4, 0);
380 int yf = (y >> 8) & 255;
381 InterpolateRow(dst_argb, rowptr, rowstride, dst_width * 4, yf);
383 dst_argb += dst_stride;
386 free_aligned_buffer_64(row);
391 // Scale YUV to ARGB up with bilinear interpolation.
392 static void ScaleYUVToARGBBilinearUp(int src_width, int src_height,
393 int dst_width, int dst_height,
402 int x, int dx, int y, int dy,
403 enum FilterMode filtering) {
405 void (*I422ToARGBRow)(const uint8* y_buf,
409 int width) = I422ToARGBRow_C;
410 #if defined(HAS_I422TOARGBROW_SSSE3)
411 if (TestCpuFlag(kCpuHasSSSE3)) {
412 I422ToARGBRow = I422ToARGBRow_Any_SSSE3;
413 if (IS_ALIGNED(src_width, 8)) {
414 I422ToARGBRow = I422ToARGBRow_SSSE3;
418 #if defined(HAS_I422TOARGBROW_AVX2)
419 if (TestCpuFlag(kCpuHasAVX2)) {
420 I422ToARGBRow = I422ToARGBRow_Any_AVX2;
421 if (IS_ALIGNED(src_width, 16)) {
422 I422ToARGBRow = I422ToARGBRow_AVX2;
426 #if defined(HAS_I422TOARGBROW_NEON)
427 if (TestCpuFlag(kCpuHasNEON)) {
428 I422ToARGBRow = I422ToARGBRow_Any_NEON;
429 if (IS_ALIGNED(src_width, 8)) {
430 I422ToARGBRow = I422ToARGBRow_NEON;
434 #if defined(HAS_I422TOARGBROW_MIPS_DSPR2)
435 if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(src_width, 4) &&
436 IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
437 IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
438 IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
439 IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
440 I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2;
444 void (*InterpolateRow)(uint8* dst_argb, const uint8* src_argb,
445 ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
447 #if defined(HAS_INTERPOLATEROW_SSE2)
448 if (TestCpuFlag(kCpuHasSSE2)) {
449 InterpolateRow = InterpolateRow_Any_SSE2;
450 if (IS_ALIGNED(dst_width, 4)) {
451 InterpolateRow = InterpolateRow_SSE2;
455 #if defined(HAS_INTERPOLATEROW_SSSE3)
456 if (TestCpuFlag(kCpuHasSSSE3)) {
457 InterpolateRow = InterpolateRow_Any_SSSE3;
458 if (IS_ALIGNED(dst_width, 4)) {
459 InterpolateRow = InterpolateRow_SSSE3;
463 #if defined(HAS_INTERPOLATEROW_AVX2)
464 if (TestCpuFlag(kCpuHasAVX2)) {
465 InterpolateRow = InterpolateRow_Any_AVX2;
466 if (IS_ALIGNED(dst_width, 8)) {
467 InterpolateRow = InterpolateRow_AVX2;
471 #if defined(HAS_INTERPOLATEROW_NEON)
472 if (TestCpuFlag(kCpuHasNEON)) {
473 InterpolateRow = InterpolateRow_Any_NEON;
474 if (IS_ALIGNED(dst_width, 4)) {
475 InterpolateRow = InterpolateRow_NEON;
479 #if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
480 if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
481 IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
482 InterpolateRow = InterpolateRow_MIPS_DSPR2;
486 void (*ScaleARGBFilterCols)(uint8* dst_argb, const uint8* src_argb,
487 int dst_width, int x, int dx) =
488 filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C;
489 if (src_width >= 32768) {
490 ScaleARGBFilterCols = filtering ?
491 ScaleARGBFilterCols64_C : ScaleARGBCols64_C;
493 #if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
494 if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
495 ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
498 #if defined(HAS_SCALEARGBCOLS_SSE2)
499 if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
500 ScaleARGBFilterCols = ScaleARGBCols_SSE2;
503 if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
504 ScaleARGBFilterCols = ScaleARGBColsUp2_C;
505 #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
506 if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
507 ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2;
512 const int max_y = (src_height - 1) << 16;
516 const int kYShift = 1; // Shift Y by 1 to convert Y plane to UV coordinate.
518 int uv_yi = yi >> kYShift;
519 const uint8* src_row_y = src_y + yi * src_stride_y;
520 const uint8* src_row_u = src_u + uv_yi * src_stride_u;
521 const uint8* src_row_v = src_v + uv_yi * src_stride_v;
523 // Allocate 2 rows of ARGB.
524 const int kRowSize = (dst_width * 4 + 15) & ~15;
525 align_buffer_64(row, kRowSize * 2);
527 // Allocate 1 row of ARGB for source conversion.
528 align_buffer_64(argb_row, src_width * 4);
531 int rowstride = kRowSize;
534 // TODO(fbarchard): Convert first 2 rows of YUV to ARGB.
535 ScaleARGBFilterCols(rowptr, src_row_y, dst_width, x, dx);
536 if (src_height > 1) {
537 src_row_y += src_stride_y;
539 src_row_u += src_stride_u;
540 src_row_v += src_stride_v;
543 ScaleARGBFilterCols(rowptr + rowstride, src_row_y, dst_width, x, dx);
544 if (src_height > 2) {
545 src_row_y += src_stride_y;
547 src_row_u += src_stride_u;
548 src_row_v += src_stride_v;
552 for (j = 0; j < dst_height; ++j) {
558 uv_yi = yi >> kYShift;
559 src_row_y = src_y + yi * src_stride_y;
560 src_row_u = src_u + uv_yi * src_stride_u;
561 src_row_v = src_v + uv_yi * src_stride_v;
564 // TODO(fbarchard): Convert the clipped region of row.
565 I422ToARGBRow(src_row_y, src_row_u, src_row_v, argb_row, src_width);
566 ScaleARGBFilterCols(rowptr, argb_row, dst_width, x, dx);
568 rowstride = -rowstride;
570 src_row_y += src_stride_y;
572 src_row_u += src_stride_u;
573 src_row_v += src_stride_v;
577 if (filtering == kFilterLinear) {
578 InterpolateRow(dst_argb, rowptr, 0, dst_width * 4, 0);
580 int yf = (y >> 8) & 255;
581 InterpolateRow(dst_argb, rowptr, rowstride, dst_width * 4, yf);
583 dst_argb += dst_stride_argb;
586 free_aligned_buffer_64(row);
587 free_aligned_buffer_64(row_argb);
591 // Scale ARGB to/from any dimensions, without interpolation.
592 // Fixed point math is used for performance: The upper 16 bits
593 // of x and dx is the integer part of the source position and
594 // the lower 16 bits are the fixed decimal part.
596 static void ScaleARGBSimple(int src_width, int src_height,
597 int dst_width, int dst_height,
598 int src_stride, int dst_stride,
599 const uint8* src_argb, uint8* dst_argb,
600 int x, int dx, int y, int dy) {
602 void (*ScaleARGBCols)(uint8* dst_argb, const uint8* src_argb,
603 int dst_width, int x, int dx) =
604 (src_width >= 32768) ? ScaleARGBCols64_C : ScaleARGBCols_C;
605 #if defined(HAS_SCALEARGBCOLS_SSE2)
606 if (TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
607 ScaleARGBCols = ScaleARGBCols_SSE2;
610 if (src_width * 2 == dst_width && x < 0x8000) {
611 ScaleARGBCols = ScaleARGBColsUp2_C;
612 #if defined(HAS_SCALEARGBCOLSUP2_SSE2)
613 if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
614 ScaleARGBCols = ScaleARGBColsUp2_SSE2;
619 for (j = 0; j < dst_height; ++j) {
620 ScaleARGBCols(dst_argb, src_argb + (y >> 16) * src_stride,
622 dst_argb += dst_stride;
628 // This function in turn calls a scaling function
629 // suitable for handling the desired resolutions.
630 static void ScaleARGB(const uint8* src, int src_stride,
631 int src_width, int src_height,
632 uint8* dst, int dst_stride,
633 int dst_width, int dst_height,
634 int clip_x, int clip_y, int clip_width, int clip_height,
635 enum FilterMode filtering) {
636 // Initial source x/y coordinate and step values as 16.16 fixed point.
641 // ARGB does not support box filter yet, but allow the user to pass it.
642 // Simplify filtering when possible.
643 filtering = ScaleFilterReduce(src_width, src_height,
644 dst_width, dst_height,
647 // Negative src_height means invert the image.
648 if (src_height < 0) {
649 src_height = -src_height;
650 src = src + (src_height - 1) * src_stride;
651 src_stride = -src_stride;
653 ScaleSlope(src_width, src_height, dst_width, dst_height, filtering,
655 src_width = Abs(src_width);
657 int64 clipf = (int64)(clip_x) * dx;
658 x += (clipf & 0xffff);
659 src += (clipf >> 16) * 4;
663 int64 clipf = (int64)(clip_y) * dy;
664 y += (clipf & 0xffff);
665 src += (clipf >> 16) * src_stride;
666 dst += clip_y * dst_stride;
669 // Special case for integer step values.
670 if (((dx | dy) & 0xffff) == 0) {
671 if (!dx || !dy) { // 1 pixel wide and/or tall.
672 filtering = kFilterNone;
674 // Optimized even scale down. ie 2, 4, 6, 8, 10x.
675 if (!(dx & 0x10000) && !(dy & 0x10000)) {
677 // Optimized 1/2 downsample.
678 ScaleARGBDown2(src_width, src_height,
679 clip_width, clip_height,
680 src_stride, dst_stride, src, dst,
681 x, dx, y, dy, filtering);
684 if (dx == 0x40000 && filtering == kFilterBox) {
685 // Optimized 1/4 box downsample.
686 ScaleARGBDown4Box(src_width, src_height,
687 clip_width, clip_height,
688 src_stride, dst_stride, src, dst,
692 ScaleARGBDownEven(src_width, src_height,
693 clip_width, clip_height,
694 src_stride, dst_stride, src, dst,
695 x, dx, y, dy, filtering);
698 // Optimized odd scale down. ie 3, 5, 7, 9x.
699 if ((dx & 0x10000) && (dy & 0x10000)) {
700 filtering = kFilterNone;
701 if (dx == 0x10000 && dy == 0x10000) {
703 ARGBCopy(src + (y >> 16) * src_stride + (x >> 16) * 4, src_stride,
704 dst, dst_stride, clip_width, clip_height);
710 if (dx == 0x10000 && (x & 0xffff) == 0) {
711 // Arbitrary scale vertically, but unscaled vertically.
712 ScalePlaneVertical(src_height,
713 clip_width, clip_height,
714 src_stride, dst_stride, src, dst,
715 x, y, dy, 4, filtering);
718 if (filtering && dy < 65536) {
719 ScaleARGBBilinearUp(src_width, src_height,
720 clip_width, clip_height,
721 src_stride, dst_stride, src, dst,
722 x, dx, y, dy, filtering);
726 ScaleARGBBilinearDown(src_width, src_height,
727 clip_width, clip_height,
728 src_stride, dst_stride, src, dst,
729 x, dx, y, dy, filtering);
732 ScaleARGBSimple(src_width, src_height, clip_width, clip_height,
733 src_stride, dst_stride, src, dst,
738 int ARGBScaleClip(const uint8* src_argb, int src_stride_argb,
739 int src_width, int src_height,
740 uint8* dst_argb, int dst_stride_argb,
741 int dst_width, int dst_height,
742 int clip_x, int clip_y, int clip_width, int clip_height,
743 enum FilterMode filtering) {
744 if (!src_argb || src_width == 0 || src_height == 0 ||
745 !dst_argb || dst_width <= 0 || dst_height <= 0 ||
746 clip_x < 0 || clip_y < 0 ||
747 (clip_x + clip_width) > dst_width ||
748 (clip_y + clip_height) > dst_height) {
751 ScaleARGB(src_argb, src_stride_argb, src_width, src_height,
752 dst_argb, dst_stride_argb, dst_width, dst_height,
753 clip_x, clip_y, clip_width, clip_height, filtering);
757 // Scale an ARGB image.
759 int ARGBScale(const uint8* src_argb, int src_stride_argb,
760 int src_width, int src_height,
761 uint8* dst_argb, int dst_stride_argb,
762 int dst_width, int dst_height,
763 enum FilterMode filtering) {
764 if (!src_argb || src_width == 0 || src_height == 0 ||
765 !dst_argb || dst_width <= 0 || dst_height <= 0) {
768 ScaleARGB(src_argb, src_stride_argb, src_width, src_height,
769 dst_argb, dst_stride_argb, dst_width, dst_height,
770 0, 0, dst_width, dst_height, filtering);
776 } // namespace libyuv