2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "vpx/vpx_image.h"
15 #include "vpx/vpx_integer.h"
16 #include "vpx_mem/vpx_mem.h"
18 static vpx_image_t *img_alloc_helper(vpx_image_t *img,
22 unsigned int buf_align,
23 unsigned int stride_align,
24 unsigned char *img_data) {
26 unsigned int h, w, s, xcs, ycs, bps;
29 /* Treat align==0 like align==1 */
33 /* Validate alignment (must be power of 2) */
34 if (buf_align & (buf_align - 1))
37 /* Treat align==0 like align==1 */
41 /* Validate alignment (must be power of 2) */
42 if (stride_align & (stride_align - 1))
45 /* Get sample size for this format */
47 case VPX_IMG_FMT_RGB32:
48 case VPX_IMG_FMT_RGB32_LE:
49 case VPX_IMG_FMT_ARGB:
50 case VPX_IMG_FMT_ARGB_LE:
53 case VPX_IMG_FMT_RGB24:
54 case VPX_IMG_FMT_BGR24:
57 case VPX_IMG_FMT_RGB565:
58 case VPX_IMG_FMT_RGB565_LE:
59 case VPX_IMG_FMT_RGB555:
60 case VPX_IMG_FMT_RGB555_LE:
61 case VPX_IMG_FMT_UYVY:
62 case VPX_IMG_FMT_YUY2:
63 case VPX_IMG_FMT_YVYU:
66 case VPX_IMG_FMT_I420:
67 case VPX_IMG_FMT_YV12:
68 case VPX_IMG_FMT_VPXI420:
69 case VPX_IMG_FMT_VPXYV12:
72 case VPX_IMG_FMT_I422:
75 case VPX_IMG_FMT_I444:
78 case VPX_IMG_FMT_I42016:
81 case VPX_IMG_FMT_I42216:
84 case VPX_IMG_FMT_I44416:
92 /* Get chroma shift values for this format */
94 case VPX_IMG_FMT_I420:
95 case VPX_IMG_FMT_YV12:
96 case VPX_IMG_FMT_VPXI420:
97 case VPX_IMG_FMT_VPXYV12:
98 case VPX_IMG_FMT_I422:
99 case VPX_IMG_FMT_I42016:
100 case VPX_IMG_FMT_I42216:
109 case VPX_IMG_FMT_I420:
110 case VPX_IMG_FMT_YV12:
111 case VPX_IMG_FMT_VPXI420:
112 case VPX_IMG_FMT_VPXYV12:
113 case VPX_IMG_FMT_I42016:
121 /* Calculate storage sizes given the chroma subsampling */
122 align = (1 << xcs) - 1;
123 w = (d_w + align) & ~align;
124 align = (1 << ycs) - 1;
125 h = (d_h + align) & ~align;
126 s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8;
127 s = (s + stride_align - 1) & ~(stride_align - 1);
129 /* Allocate the new image */
131 img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
136 img->self_allocd = 1;
138 memset(img, 0, sizeof(vpx_image_t));
141 img->img_data = img_data;
144 const uint64_t alloc_size = (fmt & VPX_IMG_FMT_PLANAR) ?
145 (uint64_t)h * s * bps / 8 : (uint64_t)h * s;
147 if (alloc_size != (size_t)alloc_size)
150 img->img_data = (uint8_t *)vpx_memalign(buf_align, (size_t)alloc_size);
151 img->img_data_owner = 1;
158 img->bit_depth = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 16 : 8;
161 img->x_chroma_shift = xcs;
162 img->y_chroma_shift = ycs;
165 /* Calculate strides */
166 img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = s;
167 img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = s >> xcs;
169 /* Default viewport to entire image */
170 if (!vpx_img_set_rect(img, 0, 0, d_w, d_h))
178 vpx_image_t *vpx_img_alloc(vpx_image_t *img,
182 unsigned int align) {
183 return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
186 vpx_image_t *vpx_img_wrap(vpx_image_t *img,
190 unsigned int stride_align,
191 unsigned char *img_data) {
192 /* By setting buf_align = 1, we don't change buffer alignment in this
194 return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
197 int vpx_img_set_rect(vpx_image_t *img,
204 if (x + w <= img->w && y + h <= img->h) {
208 /* Calculate plane pointers */
209 if (!(img->fmt & VPX_IMG_FMT_PLANAR)) {
210 img->planes[VPX_PLANE_PACKED] =
211 img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
213 const int bytes_per_sample =
214 (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
215 data = img->img_data;
217 if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) {
218 img->planes[VPX_PLANE_ALPHA] =
219 data + x * bytes_per_sample + y * img->stride[VPX_PLANE_ALPHA];
220 data += img->h * img->stride[VPX_PLANE_ALPHA];
223 img->planes[VPX_PLANE_Y] = data + x * bytes_per_sample +
224 y * img->stride[VPX_PLANE_Y];
225 data += img->h * img->stride[VPX_PLANE_Y];
227 if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) {
228 img->planes[VPX_PLANE_U] =
229 data + (x >> img->x_chroma_shift) * bytes_per_sample +
230 (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
231 data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
232 img->planes[VPX_PLANE_V] =
233 data + (x >> img->x_chroma_shift) * bytes_per_sample +
234 (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
236 img->planes[VPX_PLANE_V] =
237 data + (x >> img->x_chroma_shift) * bytes_per_sample +
238 (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
239 data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
240 img->planes[VPX_PLANE_U] =
241 data + (x >> img->x_chroma_shift) * bytes_per_sample +
242 (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
250 void vpx_img_flip(vpx_image_t *img) {
251 /* Note: In the calculation pointer adjustment calculation, we want the
252 * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
253 * standard indicates that if the adjustment parameter is unsigned, the
254 * stride parameter will be promoted to unsigned, causing errors when
255 * the lhs is a larger type than the rhs.
257 img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
258 img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
260 img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
261 * img->stride[VPX_PLANE_U];
262 img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
264 img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
265 * img->stride[VPX_PLANE_V];
266 img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
268 img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
269 img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
272 void vpx_img_free(vpx_image_t *img) {
274 if (img->img_data && img->img_data_owner)
275 vpx_free(img->img_data);
277 if (img->self_allocd)