2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
10 * Based on code from the OggTheora software codec source code,
11 * Copyright (C) 2002-2010 The Xiph.Org Foundation and contributors.
17 #include "vpx/vpx_integer.h"
20 // Reads 'size' bytes from 'file' into 'buf' with some fault tolerance.
21 // Returns true on success.
22 static int file_read(void *buf, size_t size, FILE *file) {
23 const int kMaxRetries = 5;
28 const size_t n = fread((uint8_t *)buf + len, 1, size - len, file);
30 file_error = ferror(file);
32 if (errno == EINTR || errno == EAGAIN) {
36 fprintf(stderr, "Error reading file: %u of %u bytes read, %d: %s\n",
37 (uint32_t)len, (uint32_t)size, errno, strerror(errno));
41 } while (!feof(file) && len < size && ++retry_count < kMaxRetries);
43 if (!feof(file) && len != size) {
45 "Error reading file: %u of %u bytes read,"
46 " error: %d, retries: %d, %d: %s\n",
47 (uint32_t)len, (uint32_t)size, file_error, retry_count, errno,
53 static int y4m_parse_tags(y4m_input *_y4m, char *_tags) {
62 got_w = got_h = got_fps = got_interlace = got_par = got_chroma = 0;
63 for (p = _tags;; p = q) {
64 /*Skip any leading spaces.*/
65 while (*p == ' ') p++;
66 /*If that's all we have, stop.*/
67 if (p[0] == '\0') break;
68 /*Find the end of this tag.*/
69 for (q = p + 1; *q != '\0' && *q != ' '; q++) {
74 if (sscanf(p + 1, "%d", &_y4m->pic_w) != 1) return -1;
79 if (sscanf(p + 1, "%d", &_y4m->pic_h) != 1) return -1;
84 if (sscanf(p + 1, "%d:%d", &_y4m->fps_n, &_y4m->fps_d) != 2) {
91 _y4m->interlace = p[1];
96 if (sscanf(p + 1, "%d:%d", &_y4m->par_n, &_y4m->par_d) != 2) {
103 if (q - p > 16) return -1;
104 memcpy(_y4m->chroma_type, p + 1, q - p - 1);
105 _y4m->chroma_type[q - p - 1] = '\0';
109 /*Ignore unknown tags.*/
112 if (!got_w || !got_h || !got_fps) return -1;
113 if (!got_interlace) _y4m->interlace = '?';
114 if (!got_par) _y4m->par_n = _y4m->par_d = 0;
115 /*Chroma-type is not specified in older files, e.g., those generated by
117 if (!got_chroma) strcpy(_y4m->chroma_type, "420");
121 /*All anti-aliasing filters in the following conversion functions are based on
122 one of two window functions:
123 The 6-tap Lanczos window (for down-sampling and shifts):
124 sinc(\pi*t)*sinc(\pi*t/3), |t|<3 (sinc(t)==sin(t)/t)
126 The 4-tap Mitchell window (for up-sampling):
127 7|t|^3-12|t|^2+16/3, |t|<1
128 -(7/3)|x|^3+12|x|^2-20|x|+32/3, |t|<2
130 The number of taps is intentionally kept small to reduce computational
131 overhead and limit ringing.
133 The taps from these filters are scaled so that their sum is 1, and the result
134 is scaled by 128 and rounded to integers to create a filter whose
135 intermediate values fit inside 16 bits.
136 Coefficients are rounded in such a way as to ensure their sum is still 128,
137 which is usually equivalent to normal rounding.
139 Conversions which require both horizontal and vertical filtering could
140 have these steps pipelined, for less memory consumption and better cache
141 performance, but we do them separately for simplicity.*/
143 #define OC_MINI(_a, _b) ((_a) > (_b) ? (_b) : (_a))
144 #define OC_MAXI(_a, _b) ((_a) < (_b) ? (_b) : (_a))
145 #define OC_CLAMPI(_a, _b, _c) (OC_MAXI(_a, OC_MINI(_b, _c)))
147 /*420jpeg chroma samples are sited like:
148 Y-------Y-------Y-------Y-------
152 Y-------Y-------Y-------Y-------
156 Y-------Y-------Y-------Y-------
160 Y-------Y-------Y-------Y-------
165 420mpeg2 chroma samples are sited like:
166 Y-------Y-------Y-------Y-------
170 Y-------Y-------Y-------Y-------
174 Y-------Y-------Y-------Y-------
178 Y-------Y-------Y-------Y-------
183 We use a resampling filter to shift the site locations one quarter pixel (at
184 the chroma plane's resolution) to the right.
185 The 4:2:2 modes look exactly the same, except there are twice as many chroma
186 lines, and they are vertically co-sited with the luma samples in both the
187 mpeg2 and jpeg cases (thus requiring no vertical resampling).*/
188 static void y4m_42xmpeg2_42xjpeg_helper(unsigned char *_dst,
189 const unsigned char *_src, int _c_w,
193 for (y = 0; y < _c_h; y++) {
194 /*Filter: [4 -17 114 35 -9 1]/128, derived from a 6-tap Lanczos
196 for (x = 0; x < OC_MINI(_c_w, 2); x++) {
197 _dst[x] = (unsigned char)OC_CLAMPI(
198 0, (4 * _src[0] - 17 * _src[OC_MAXI(x - 1, 0)] + 114 * _src[x] +
199 35 * _src[OC_MINI(x + 1, _c_w - 1)] -
200 9 * _src[OC_MINI(x + 2, _c_w - 1)] +
201 _src[OC_MINI(x + 3, _c_w - 1)] + 64) >>
205 for (; x < _c_w - 3; x++) {
206 _dst[x] = (unsigned char)OC_CLAMPI(
207 0, (4 * _src[x - 2] - 17 * _src[x - 1] + 114 * _src[x] +
208 35 * _src[x + 1] - 9 * _src[x + 2] + _src[x + 3] + 64) >>
212 for (; x < _c_w; x++) {
213 _dst[x] = (unsigned char)OC_CLAMPI(
214 0, (4 * _src[x - 2] - 17 * _src[x - 1] + 114 * _src[x] +
215 35 * _src[OC_MINI(x + 1, _c_w - 1)] -
216 9 * _src[OC_MINI(x + 2, _c_w - 1)] + _src[_c_w - 1] + 64) >>
225 /*Handles both 422 and 420mpeg2 to 422jpeg and 420jpeg, respectively.*/
226 static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
227 unsigned char *_aux) {
232 /*Skip past the luma data.*/
233 _dst += _y4m->pic_w * _y4m->pic_h;
234 /*Compute the size of each chroma plane.*/
235 c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
236 c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
238 for (pli = 1; pli < 3; pli++) {
239 y4m_42xmpeg2_42xjpeg_helper(_dst, _aux, c_w, c_h);
245 /*This format is only used for interlaced content, but is included for
248 420jpeg chroma samples are sited like:
249 Y-------Y-------Y-------Y-------
253 Y-------Y-------Y-------Y-------
257 Y-------Y-------Y-------Y-------
261 Y-------Y-------Y-------Y-------
266 420paldv chroma samples are sited like:
267 YR------Y-------YR------Y-------
271 YB------Y-------YB------Y-------
275 YR------Y-------YR------Y-------
279 YB------Y-------YB------Y-------
284 We use a resampling filter to shift the site locations one quarter pixel (at
285 the chroma plane's resolution) to the right.
286 Then we use another filter to move the C_r location down one quarter pixel,
287 and the C_b location up one quarter pixel.*/
288 static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
289 unsigned char *_aux) {
297 /*Skip past the luma data.*/
298 _dst += _y4m->pic_w * _y4m->pic_h;
299 /*Compute the size of each chroma plane.*/
300 c_w = (_y4m->pic_w + 1) / 2;
301 c_h = (_y4m->pic_h + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
303 tmp = _aux + 2 * c_sz;
304 for (pli = 1; pli < 3; pli++) {
305 /*First do the horizontal re-sampling.
306 This is the same as the mpeg2 case, except that after the horizontal
307 case, we need to apply a second vertical filter.*/
308 y4m_42xmpeg2_42xjpeg_helper(tmp, _aux, c_w, c_h);
312 /*Slide C_b up a quarter-pel.
313 This is the same filter used above, but in the other order.*/
314 for (x = 0; x < c_w; x++) {
315 for (y = 0; y < OC_MINI(c_h, 3); y++) {
316 _dst[y * c_w] = (unsigned char)OC_CLAMPI(
317 0, (tmp[0] - 9 * tmp[OC_MAXI(y - 2, 0) * c_w] +
318 35 * tmp[OC_MAXI(y - 1, 0) * c_w] + 114 * tmp[y * c_w] -
319 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] +
320 4 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + 64) >>
324 for (; y < c_h - 2; y++) {
325 _dst[y * c_w] = (unsigned char)OC_CLAMPI(
326 0, (tmp[(y - 3) * c_w] - 9 * tmp[(y - 2) * c_w] +
327 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] -
328 17 * tmp[(y + 1) * c_w] + 4 * tmp[(y + 2) * c_w] + 64) >>
332 for (; y < c_h; y++) {
333 _dst[y * c_w] = (unsigned char)OC_CLAMPI(
334 0, (tmp[(y - 3) * c_w] - 9 * tmp[(y - 2) * c_w] +
335 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] -
336 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] +
337 4 * tmp[(c_h - 1) * c_w] + 64) >>
349 /*Slide C_r down a quarter-pel.
350 This is the same as the horizontal filter.*/
351 for (x = 0; x < c_w; x++) {
352 for (y = 0; y < OC_MINI(c_h, 2); y++) {
353 _dst[y * c_w] = (unsigned char)OC_CLAMPI(
355 (4 * tmp[0] - 17 * tmp[OC_MAXI(y - 1, 0) * c_w] +
356 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] -
357 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] +
358 tmp[OC_MINI(y + 3, c_h - 1) * c_w] + 64) >>
362 for (; y < c_h - 3; y++) {
363 _dst[y * c_w] = (unsigned char)OC_CLAMPI(
364 0, (4 * tmp[(y - 2) * c_w] - 17 * tmp[(y - 1) * c_w] +
365 114 * tmp[y * c_w] + 35 * tmp[(y + 1) * c_w] -
366 9 * tmp[(y + 2) * c_w] + tmp[(y + 3) * c_w] + 64) >>
370 for (; y < c_h; y++) {
371 _dst[y * c_w] = (unsigned char)OC_CLAMPI(
373 (4 * tmp[(y - 2) * c_w] - 17 * tmp[(y - 1) * c_w] +
374 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] -
375 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + tmp[(c_h - 1) * c_w] +
386 /*For actual interlaced material, this would have to be done separately on
387 each field, and the shift amounts would be different.
388 C_r moves down 1/8, C_b up 3/8 in the top field, and C_r moves down 3/8,
389 C_b up 1/8 in the bottom field.
390 The corresponding filters would be:
391 Down 1/8 (reverse order for up): [3 -11 125 15 -4 0]/128
392 Down 3/8 (reverse order for up): [4 -19 98 56 -13 2]/128*/
396 /*Perform vertical filtering to reduce a single plane from 4:2:2 to 4:2:0.
397 This is used as a helper by several converation routines.*/
398 static void y4m_422jpeg_420jpeg_helper(unsigned char *_dst,
399 const unsigned char *_src, int _c_w,
403 /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
404 for (x = 0; x < _c_w; x++) {
405 for (y = 0; y < OC_MINI(_c_h, 2); y += 2) {
406 _dst[(y >> 1) * _c_w] =
407 OC_CLAMPI(0, (64 * _src[0] + 78 * _src[OC_MINI(1, _c_h - 1) * _c_w] -
408 17 * _src[OC_MINI(2, _c_h - 1) * _c_w] +
409 3 * _src[OC_MINI(3, _c_h - 1) * _c_w] + 64) >>
413 for (; y < _c_h - 3; y += 2) {
414 _dst[(y >> 1) * _c_w] =
415 OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w] + _src[(y + 3) * _c_w]) -
416 17 * (_src[(y - 1) * _c_w] + _src[(y + 2) * _c_w]) +
417 78 * (_src[y * _c_w] + _src[(y + 1) * _c_w]) + 64) >>
421 for (; y < _c_h; y += 2) {
422 _dst[(y >> 1) * _c_w] = OC_CLAMPI(
424 (3 * (_src[(y - 2) * _c_w] + _src[(_c_h - 1) * _c_w]) -
425 17 * (_src[(y - 1) * _c_w] + _src[OC_MINI(y + 2, _c_h - 1) * _c_w]) +
426 78 * (_src[y * _c_w] + _src[OC_MINI(y + 1, _c_h - 1) * _c_w]) +
436 /*420jpeg chroma samples are sited like:
437 Y-------Y-------Y-------Y-------
441 Y-------Y-------Y-------Y-------
445 Y-------Y-------Y-------Y-------
449 Y-------Y-------Y-------Y-------
454 422jpeg chroma samples are sited like:
455 Y---BR--Y-------Y---BR--Y-------
459 Y---BR--Y-------Y---BR--Y-------
463 Y---BR--Y-------Y---BR--Y-------
467 Y---BR--Y-------Y---BR--Y-------
472 We use a resampling filter to decimate the chroma planes by two in the
473 vertical direction.*/
474 static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m, unsigned char *_dst,
475 unsigned char *_aux) {
483 /*Skip past the luma data.*/
484 _dst += _y4m->pic_w * _y4m->pic_h;
485 /*Compute the size of each chroma plane.*/
486 c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
488 dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
489 dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
491 dst_c_sz = dst_c_w * dst_c_h;
492 for (pli = 1; pli < 3; pli++) {
493 y4m_422jpeg_420jpeg_helper(_dst, _aux, c_w, c_h);
499 /*420jpeg chroma samples are sited like:
500 Y-------Y-------Y-------Y-------
504 Y-------Y-------Y-------Y-------
508 Y-------Y-------Y-------Y-------
512 Y-------Y-------Y-------Y-------
517 422 chroma samples are sited like:
518 YBR-----Y-------YBR-----Y-------
522 YBR-----Y-------YBR-----Y-------
526 YBR-----Y-------YBR-----Y-------
530 YBR-----Y-------YBR-----Y-------
535 We use a resampling filter to shift the original site locations one quarter
536 pixel (at the original chroma resolution) to the right.
537 Then we use a second resampling filter to decimate the chroma planes by two
538 in the vertical direction.*/
539 static void y4m_convert_422_420jpeg(y4m_input *_y4m, unsigned char *_dst,
540 unsigned char *_aux) {
548 /*Skip past the luma data.*/
549 _dst += _y4m->pic_w * _y4m->pic_h;
550 /*Compute the size of each chroma plane.*/
551 c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
553 dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
555 dst_c_sz = c_w * dst_c_h;
556 tmp = _aux + 2 * c_sz;
557 for (pli = 1; pli < 3; pli++) {
558 /*In reality, the horizontal and vertical steps could be pipelined, for
559 less memory consumption and better cache performance, but we do them
560 separately for simplicity.*/
561 /*First do horizontal filtering (convert to 422jpeg)*/
562 y4m_42xmpeg2_42xjpeg_helper(tmp, _aux, c_w, c_h);
563 /*Now do the vertical filtering.*/
564 y4m_422jpeg_420jpeg_helper(_dst, tmp, c_w, c_h);
570 /*420jpeg chroma samples are sited like:
571 Y-------Y-------Y-------Y-------
575 Y-------Y-------Y-------Y-------
579 Y-------Y-------Y-------Y-------
583 Y-------Y-------Y-------Y-------
588 411 chroma samples are sited like:
589 YBR-----Y-------Y-------Y-------
593 YBR-----Y-------Y-------Y-------
597 YBR-----Y-------Y-------Y-------
601 YBR-----Y-------Y-------Y-------
606 We use a filter to resample at site locations one eighth pixel (at the source
607 chroma plane's horizontal resolution) and five eighths of a pixel to the
609 Then we use another filter to decimate the planes by 2 in the vertical
611 static void y4m_convert_411_420jpeg(y4m_input *_y4m, unsigned char *_dst,
612 unsigned char *_aux) {
624 /*Skip past the luma data.*/
625 _dst += _y4m->pic_w * _y4m->pic_h;
626 /*Compute the size of each chroma plane.*/
627 c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
629 dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
630 dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
632 dst_c_sz = dst_c_w * dst_c_h;
633 tmp_sz = dst_c_w * c_h;
634 tmp = _aux + 2 * c_sz;
635 for (pli = 1; pli < 3; pli++) {
636 /*In reality, the horizontal and vertical steps could be pipelined, for
637 less memory consumption and better cache performance, but we do them
638 separately for simplicity.*/
639 /*First do horizontal filtering (convert to 422jpeg)*/
640 for (y = 0; y < c_h; y++) {
641 /*Filters: [1 110 18 -1]/128 and [-3 50 86 -5]/128, both derived from a
642 4-tap Mitchell window.*/
643 for (x = 0; x < OC_MINI(c_w, 1); x++) {
644 tmp[x << 1] = (unsigned char)OC_CLAMPI(
645 0, (111 * _aux[0] + 18 * _aux[OC_MINI(1, c_w - 1)] -
646 _aux[OC_MINI(2, c_w - 1)] + 64) >>
649 tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(
650 0, (47 * _aux[0] + 86 * _aux[OC_MINI(1, c_w - 1)] -
651 5 * _aux[OC_MINI(2, c_w - 1)] + 64) >>
655 for (; x < c_w - 2; x++) {
657 (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x] +
658 18 * _aux[x + 1] - _aux[x + 2] + 64) >>
661 tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(
662 0, (-3 * _aux[x - 1] + 50 * _aux[x] + 86 * _aux[x + 1] -
663 5 * _aux[x + 2] + 64) >>
667 for (; x < c_w; x++) {
668 tmp[x << 1] = (unsigned char)OC_CLAMPI(
669 0, (_aux[x - 1] + 110 * _aux[x] +
670 18 * _aux[OC_MINI(x + 1, c_w - 1)] - _aux[c_w - 1] + 64) >>
673 if ((x << 1 | 1) < dst_c_w) {
674 tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(
676 (-3 * _aux[x - 1] + 50 * _aux[x] +
677 86 * _aux[OC_MINI(x + 1, c_w - 1)] - 5 * _aux[c_w - 1] + 64) >>
686 /*Now do the vertical filtering.*/
687 y4m_422jpeg_420jpeg_helper(_dst, tmp, dst_c_w, c_h);
692 /*Convert 444 to 420jpeg.*/
693 static void y4m_convert_444_420jpeg(y4m_input *_y4m, unsigned char *_dst,
694 unsigned char *_aux) {
706 /*Skip past the luma data.*/
707 _dst += _y4m->pic_w * _y4m->pic_h;
708 /*Compute the size of each chroma plane.*/
709 c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
711 dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
712 dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
714 dst_c_sz = dst_c_w * dst_c_h;
715 tmp_sz = dst_c_w * c_h;
716 tmp = _aux + 2 * c_sz;
717 for (pli = 1; pli < 3; pli++) {
718 /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
719 for (y = 0; y < c_h; y++) {
720 for (x = 0; x < OC_MINI(c_w, 2); x += 2) {
722 OC_CLAMPI(0, (64 * _aux[0] + 78 * _aux[OC_MINI(1, c_w - 1)] -
723 17 * _aux[OC_MINI(2, c_w - 1)] +
724 3 * _aux[OC_MINI(3, c_w - 1)] + 64) >>
728 for (; x < c_w - 3; x += 2) {
729 tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[x + 3]) -
730 17 * (_aux[x - 1] + _aux[x + 2]) +
731 78 * (_aux[x] + _aux[x + 1]) + 64) >>
735 for (; x < c_w; x += 2) {
736 tmp[x >> 1] = OC_CLAMPI(
737 0, (3 * (_aux[x - 2] + _aux[c_w - 1]) -
738 17 * (_aux[x - 1] + _aux[OC_MINI(x + 2, c_w - 1)]) +
739 78 * (_aux[x] + _aux[OC_MINI(x + 1, c_w - 1)]) + 64) >>
747 /*Now do the vertical filtering.*/
748 y4m_422jpeg_420jpeg_helper(_dst, tmp, dst_c_w, c_h);
753 /*The image is padded with empty chroma components at 4:2:0.*/
754 static void y4m_convert_mono_420jpeg(y4m_input *_y4m, unsigned char *_dst,
755 unsigned char *_aux) {
758 _dst += _y4m->pic_w * _y4m->pic_h;
759 c_sz = ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) *
760 ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v);
761 memset(_dst, 128, c_sz * 2);
764 /*No conversion function needed.*/
765 static void y4m_convert_null(y4m_input *_y4m, unsigned char *_dst,
766 unsigned char *_aux) {
772 int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
774 char buffer[80] = { 0 };
777 /*Read until newline, or 80 cols, whichever happens first.*/
778 for (i = 0; i < 79; i++) {
780 buffer[i] = *_skip++;
783 if (!file_read(buffer + i, 1, _fin)) return -1;
785 if (buffer[i] == '\n') break;
787 /*We skipped too much header data.*/
788 if (_nskip > 0) return -1;
790 fprintf(stderr, "Error parsing header; not a YUV2MPEG2 file?\n");
794 if (memcmp(buffer, "YUV4MPEG", 8)) {
795 fprintf(stderr, "Incomplete magic for YUV4MPEG file.\n");
798 if (buffer[8] != '2') {
799 fprintf(stderr, "Incorrect YUV input file version; YUV4MPEG2 required.\n");
801 ret = y4m_parse_tags(_y4m, buffer + 5);
803 fprintf(stderr, "Error parsing YUV4MPEG2 header.\n");
806 if (_y4m->interlace == '?') {
808 "Warning: Input video interlacing format unknown; "
809 "assuming progressive scan.\n");
810 } else if (_y4m->interlace != 'p') {
812 "Input video is interlaced; "
813 "Only progressive scan handled.\n");
816 _y4m->vpx_fmt = VPX_IMG_FMT_I420;
819 if (strcmp(_y4m->chroma_type, "420") == 0 ||
820 strcmp(_y4m->chroma_type, "420jpeg") == 0) {
821 _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v =
822 _y4m->dst_c_dec_v = 2;
823 _y4m->dst_buf_read_sz =
824 _y4m->pic_w * _y4m->pic_h +
825 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
826 /* Natively supported: no conversion required. */
827 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
828 _y4m->convert = y4m_convert_null;
829 } else if (strcmp(_y4m->chroma_type, "420p10") == 0) {
830 _y4m->src_c_dec_h = 2;
831 _y4m->dst_c_dec_h = 2;
832 _y4m->src_c_dec_v = 2;
833 _y4m->dst_c_dec_v = 2;
834 _y4m->dst_buf_read_sz =
835 2 * (_y4m->pic_w * _y4m->pic_h +
836 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2));
837 /* Natively supported: no conversion required. */
838 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
839 _y4m->convert = y4m_convert_null;
840 _y4m->bit_depth = 10;
842 _y4m->vpx_fmt = VPX_IMG_FMT_I42016;
844 fprintf(stderr, "Unsupported conversion from 420p10 to 420jpeg\n");
847 } else if (strcmp(_y4m->chroma_type, "420p12") == 0) {
848 _y4m->src_c_dec_h = 2;
849 _y4m->dst_c_dec_h = 2;
850 _y4m->src_c_dec_v = 2;
851 _y4m->dst_c_dec_v = 2;
852 _y4m->dst_buf_read_sz =
853 2 * (_y4m->pic_w * _y4m->pic_h +
854 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2));
855 /* Natively supported: no conversion required. */
856 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
857 _y4m->convert = y4m_convert_null;
858 _y4m->bit_depth = 12;
860 _y4m->vpx_fmt = VPX_IMG_FMT_I42016;
862 fprintf(stderr, "Unsupported conversion from 420p12 to 420jpeg\n");
865 } else if (strcmp(_y4m->chroma_type, "420mpeg2") == 0) {
866 _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v =
867 _y4m->dst_c_dec_v = 2;
868 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
869 /*Chroma filter required: read into the aux buf first.*/
870 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz =
871 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
872 _y4m->convert = y4m_convert_42xmpeg2_42xjpeg;
873 } else if (strcmp(_y4m->chroma_type, "420paldv") == 0) {
874 _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v =
875 _y4m->dst_c_dec_v = 2;
876 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
877 /*Chroma filter required: read into the aux buf first.
878 We need to make two filter passes, so we need some extra space in the
880 _y4m->aux_buf_sz = 3 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
881 _y4m->aux_buf_read_sz =
882 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
883 _y4m->convert = y4m_convert_42xpaldv_42xjpeg;
884 } else if (strcmp(_y4m->chroma_type, "422jpeg") == 0) {
885 _y4m->src_c_dec_h = _y4m->dst_c_dec_h = 2;
886 _y4m->src_c_dec_v = 1;
887 _y4m->dst_c_dec_v = 2;
888 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
889 /*Chroma filter required: read into the aux buf first.*/
890 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz =
891 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
892 _y4m->convert = y4m_convert_422jpeg_420jpeg;
893 } else if (strcmp(_y4m->chroma_type, "422") == 0) {
894 _y4m->src_c_dec_h = 2;
895 _y4m->src_c_dec_v = 1;
897 _y4m->dst_c_dec_h = 2;
898 _y4m->dst_c_dec_v = 2;
899 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
900 /*Chroma filter required: read into the aux buf first.
901 We need to make two filter passes, so we need some extra space in the
903 _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
905 _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
906 _y4m->convert = y4m_convert_422_420jpeg;
908 _y4m->vpx_fmt = VPX_IMG_FMT_I422;
910 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
911 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
912 _y4m->dst_buf_read_sz =
913 _y4m->pic_w * _y4m->pic_h + 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
914 /*Natively supported: no conversion required.*/
915 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
916 _y4m->convert = y4m_convert_null;
918 } else if (strcmp(_y4m->chroma_type, "422p10") == 0) {
919 _y4m->src_c_dec_h = 2;
920 _y4m->src_c_dec_v = 1;
921 _y4m->vpx_fmt = VPX_IMG_FMT_I42216;
923 _y4m->bit_depth = 10;
924 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
925 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
926 _y4m->dst_buf_read_sz = 2 * (_y4m->pic_w * _y4m->pic_h +
927 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h);
928 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
929 _y4m->convert = y4m_convert_null;
931 fprintf(stderr, "Unsupported conversion from 422p10 to 420jpeg\n");
934 } else if (strcmp(_y4m->chroma_type, "422p12") == 0) {
935 _y4m->src_c_dec_h = 2;
936 _y4m->src_c_dec_v = 1;
937 _y4m->vpx_fmt = VPX_IMG_FMT_I42216;
939 _y4m->bit_depth = 12;
940 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
941 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
942 _y4m->dst_buf_read_sz = 2 * (_y4m->pic_w * _y4m->pic_h +
943 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h);
944 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
945 _y4m->convert = y4m_convert_null;
947 fprintf(stderr, "Unsupported conversion from 422p12 to 420jpeg\n");
950 } else if (strcmp(_y4m->chroma_type, "411") == 0) {
951 _y4m->src_c_dec_h = 4;
952 _y4m->dst_c_dec_h = 2;
953 _y4m->src_c_dec_v = 1;
954 _y4m->dst_c_dec_v = 2;
955 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
956 /*Chroma filter required: read into the aux buf first.
957 We need to make two filter passes, so we need some extra space in the
959 _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 3) / 4) * _y4m->pic_h;
961 _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
962 _y4m->convert = y4m_convert_411_420jpeg;
963 } else if (strcmp(_y4m->chroma_type, "444") == 0) {
964 _y4m->src_c_dec_h = 1;
965 _y4m->src_c_dec_v = 1;
967 _y4m->dst_c_dec_h = 2;
968 _y4m->dst_c_dec_v = 2;
969 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
970 /*Chroma filter required: read into the aux buf first.
971 We need to make two filter passes, so we need some extra space in the
973 _y4m->aux_buf_read_sz = 2 * _y4m->pic_w * _y4m->pic_h;
975 _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
976 _y4m->convert = y4m_convert_444_420jpeg;
978 _y4m->vpx_fmt = VPX_IMG_FMT_I444;
980 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
981 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
982 _y4m->dst_buf_read_sz = 3 * _y4m->pic_w * _y4m->pic_h;
983 /*Natively supported: no conversion required.*/
984 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
985 _y4m->convert = y4m_convert_null;
987 } else if (strcmp(_y4m->chroma_type, "444p10") == 0) {
988 _y4m->src_c_dec_h = 1;
989 _y4m->src_c_dec_v = 1;
990 _y4m->vpx_fmt = VPX_IMG_FMT_I44416;
992 _y4m->bit_depth = 10;
993 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
994 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
995 _y4m->dst_buf_read_sz = 2 * 3 * _y4m->pic_w * _y4m->pic_h;
996 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
997 _y4m->convert = y4m_convert_null;
999 fprintf(stderr, "Unsupported conversion from 444p10 to 420jpeg\n");
1002 } else if (strcmp(_y4m->chroma_type, "444p12") == 0) {
1003 _y4m->src_c_dec_h = 1;
1004 _y4m->src_c_dec_v = 1;
1005 _y4m->vpx_fmt = VPX_IMG_FMT_I44416;
1007 _y4m->bit_depth = 12;
1008 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
1009 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
1010 _y4m->dst_buf_read_sz = 2 * 3 * _y4m->pic_w * _y4m->pic_h;
1011 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
1012 _y4m->convert = y4m_convert_null;
1014 fprintf(stderr, "Unsupported conversion from 444p12 to 420jpeg\n");
1017 } else if (strcmp(_y4m->chroma_type, "444alpha") == 0) {
1018 _y4m->src_c_dec_h = 1;
1019 _y4m->src_c_dec_v = 1;
1021 _y4m->dst_c_dec_h = 2;
1022 _y4m->dst_c_dec_v = 2;
1023 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
1024 /*Chroma filter required: read into the aux buf first.
1025 We need to make two filter passes, so we need some extra space in the
1027 The extra plane also gets read into the aux buf.
1028 It will be discarded.*/
1029 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 3 * _y4m->pic_w * _y4m->pic_h;
1030 _y4m->convert = y4m_convert_444_420jpeg;
1032 _y4m->vpx_fmt = VPX_IMG_FMT_444A;
1034 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
1035 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
1036 _y4m->dst_buf_read_sz = 4 * _y4m->pic_w * _y4m->pic_h;
1037 /*Natively supported: no conversion required.*/
1038 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
1039 _y4m->convert = y4m_convert_null;
1041 } else if (strcmp(_y4m->chroma_type, "mono") == 0) {
1042 _y4m->src_c_dec_h = _y4m->src_c_dec_v = 0;
1043 _y4m->dst_c_dec_h = _y4m->dst_c_dec_v = 2;
1044 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
1045 /*No extra space required, but we need to clear the chroma planes.*/
1046 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
1047 _y4m->convert = y4m_convert_mono_420jpeg;
1049 fprintf(stderr, "Unknown chroma sampling type: %s\n", _y4m->chroma_type);
1052 /*The size of the final frame buffers is always computed from the
1053 destination chroma decimation type.*/
1055 _y4m->pic_w * _y4m->pic_h +
1056 2 * ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) *
1057 ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v);
1058 if (_y4m->bit_depth == 8)
1059 _y4m->dst_buf = (unsigned char *)malloc(_y4m->dst_buf_sz);
1061 _y4m->dst_buf = (unsigned char *)malloc(2 * _y4m->dst_buf_sz);
1063 if (_y4m->aux_buf_sz > 0)
1064 _y4m->aux_buf = (unsigned char *)malloc(_y4m->aux_buf_sz);
1068 void y4m_input_close(y4m_input *_y4m) {
1069 free(_y4m->dst_buf);
1070 free(_y4m->aux_buf);
1073 int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *_img) {
1079 int bytes_per_sample = _y4m->bit_depth > 8 ? 2 : 1;
1080 /*Read and skip the frame header.*/
1081 if (!file_read(frame, 6, _fin)) return 0;
1082 if (memcmp(frame, "FRAME", 5)) {
1083 fprintf(stderr, "Loss of framing in Y4M input data\n");
1086 if (frame[5] != '\n') {
1089 for (j = 0; j < 79 && file_read(&c, 1, _fin) && c != '\n'; j++) {
1092 fprintf(stderr, "Error parsing Y4M frame header\n");
1096 /*Read the frame data that needs no conversion.*/
1097 if (!file_read(_y4m->dst_buf, _y4m->dst_buf_read_sz, _fin)) {
1098 fprintf(stderr, "Error reading Y4M frame data.\n");
1101 /*Read the frame data that does need conversion.*/
1102 if (!file_read(_y4m->aux_buf, _y4m->aux_buf_read_sz, _fin)) {
1103 fprintf(stderr, "Error reading Y4M frame data.\n");
1106 /*Now convert the just read frame.*/
1107 (*_y4m->convert)(_y4m, _y4m->dst_buf, _y4m->aux_buf);
1108 /*Fill in the frame buffer pointers.
1109 We don't use vpx_img_wrap() because it forces padding for odd picture
1110 sizes, which would require a separate fread call for every row.*/
1111 memset(_img, 0, sizeof(*_img));
1112 /*Y4M has the planes in Y'CbCr order, which libvpx calls Y, U, and V.*/
1113 _img->fmt = _y4m->vpx_fmt;
1114 _img->w = _img->d_w = _y4m->pic_w;
1115 _img->h = _img->d_h = _y4m->pic_h;
1116 _img->x_chroma_shift = _y4m->dst_c_dec_h >> 1;
1117 _img->y_chroma_shift = _y4m->dst_c_dec_v >> 1;
1118 _img->bps = _y4m->bps;
1120 /*Set up the buffer pointers.*/
1121 pic_sz = _y4m->pic_w * _y4m->pic_h * bytes_per_sample;
1122 c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
1123 c_w *= bytes_per_sample;
1124 c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
1126 _img->stride[VPX_PLANE_Y] = _img->stride[VPX_PLANE_ALPHA] =
1127 _y4m->pic_w * bytes_per_sample;
1128 _img->stride[VPX_PLANE_U] = _img->stride[VPX_PLANE_V] = c_w;
1129 _img->planes[VPX_PLANE_Y] = _y4m->dst_buf;
1130 _img->planes[VPX_PLANE_U] = _y4m->dst_buf + pic_sz;
1131 _img->planes[VPX_PLANE_V] = _y4m->dst_buf + pic_sz + c_sz;
1132 _img->planes[VPX_PLANE_ALPHA] = _y4m->dst_buf + pic_sz + 2 * c_sz;