2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "vpx/vpx_image.h"
15 #include "vpx/vpx_integer.h"
16 #include "vpx_mem/vpx_mem.h"
18 static vpx_image_t *img_alloc_helper(vpx_image_t *img,
22 unsigned int buf_align,
23 unsigned int stride_align,
24 unsigned char *img_data) {
26 unsigned int h, w, s, xcs, ycs, bps;
29 /* Treat align==0 like align==1 */
33 /* Validate alignment (must be power of 2) */
34 if (buf_align & (buf_align - 1))
37 /* Treat align==0 like align==1 */
41 /* Validate alignment (must be power of 2) */
42 if (stride_align & (stride_align - 1))
45 /* Get sample size for this format */
47 case VPX_IMG_FMT_RGB32:
48 case VPX_IMG_FMT_RGB32_LE:
49 case VPX_IMG_FMT_ARGB:
50 case VPX_IMG_FMT_ARGB_LE:
53 case VPX_IMG_FMT_RGB24:
54 case VPX_IMG_FMT_BGR24:
57 case VPX_IMG_FMT_RGB565:
58 case VPX_IMG_FMT_RGB565_LE:
59 case VPX_IMG_FMT_RGB555:
60 case VPX_IMG_FMT_RGB555_LE:
61 case VPX_IMG_FMT_UYVY:
62 case VPX_IMG_FMT_YUY2:
63 case VPX_IMG_FMT_YVYU:
66 case VPX_IMG_FMT_I420:
67 case VPX_IMG_FMT_YV12:
68 case VPX_IMG_FMT_VPXI420:
69 case VPX_IMG_FMT_VPXYV12:
72 case VPX_IMG_FMT_I422:
75 case VPX_IMG_FMT_I444:
78 case VPX_IMG_FMT_I42016:
81 case VPX_IMG_FMT_I42216:
84 case VPX_IMG_FMT_I44416:
92 /* Get chroma shift values for this format */
94 case VPX_IMG_FMT_I420:
95 case VPX_IMG_FMT_YV12:
96 case VPX_IMG_FMT_VPXI420:
97 case VPX_IMG_FMT_VPXYV12:
98 case VPX_IMG_FMT_I422:
99 case VPX_IMG_FMT_I42016:
100 case VPX_IMG_FMT_I42216:
109 case VPX_IMG_FMT_I420:
110 case VPX_IMG_FMT_YV12:
111 case VPX_IMG_FMT_VPXI420:
112 case VPX_IMG_FMT_VPXYV12:
120 /* Calculate storage sizes given the chroma subsampling */
121 align = (1 << xcs) - 1;
122 w = (d_w + align) & ~align;
123 align = (1 << ycs) - 1;
124 h = (d_h + align) & ~align;
125 s = (fmt & VPX_IMG_FMT_PLANAR) ? w : bps * w / 8;
126 s = (s + stride_align - 1) & ~(stride_align - 1);
128 /* Allocate the new image */
130 img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
135 img->self_allocd = 1;
137 memset(img, 0, sizeof(vpx_image_t));
140 img->img_data = img_data;
143 const uint64_t alloc_size = (fmt & VPX_IMG_FMT_PLANAR) ?
144 (uint64_t)h * s * bps / 8 : (uint64_t)h * s;
146 if (alloc_size != (size_t)alloc_size)
149 img->img_data = (uint8_t *)vpx_memalign(buf_align, (size_t)alloc_size);
150 img->img_data_owner = 1;
157 img->bit_depth = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 16 : 8;
160 img->x_chroma_shift = xcs;
161 img->y_chroma_shift = ycs;
164 /* Calculate strides */
165 img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = s;
166 img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = s >> xcs;
168 /* Default viewport to entire image */
169 if (!vpx_img_set_rect(img, 0, 0, d_w, d_h))
177 vpx_image_t *vpx_img_alloc(vpx_image_t *img,
181 unsigned int align) {
182 return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
185 vpx_image_t *vpx_img_wrap(vpx_image_t *img,
189 unsigned int stride_align,
190 unsigned char *img_data) {
191 /* By setting buf_align = 1, we don't change buffer alignment in this
193 return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
196 int vpx_img_set_rect(vpx_image_t *img,
203 if (x + w <= img->w && y + h <= img->h) {
207 /* Calculate plane pointers */
208 if (!(img->fmt & VPX_IMG_FMT_PLANAR)) {
209 img->planes[VPX_PLANE_PACKED] =
210 img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
212 data = img->img_data;
214 if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) {
215 img->planes[VPX_PLANE_ALPHA] =
216 data + x + y * img->stride[VPX_PLANE_ALPHA];
217 data += img->h * img->stride[VPX_PLANE_ALPHA];
220 img->planes[VPX_PLANE_Y] = data + x + y * img->stride[VPX_PLANE_Y];
221 data += img->h * img->stride[VPX_PLANE_Y];
223 if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) {
224 img->planes[VPX_PLANE_U] = data
225 + (x >> img->x_chroma_shift)
226 + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
227 data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
228 img->planes[VPX_PLANE_V] = data
229 + (x >> img->x_chroma_shift)
230 + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
232 img->planes[VPX_PLANE_V] = data
233 + (x >> img->x_chroma_shift)
234 + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
235 data += (img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
236 img->planes[VPX_PLANE_U] = data
237 + (x >> img->x_chroma_shift)
238 + (y >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
248 void vpx_img_flip(vpx_image_t *img) {
249 /* Note: In the calculation pointer adjustment calculation, we want the
250 * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
251 * standard indicates that if the adjustment parameter is unsigned, the
252 * stride parameter will be promoted to unsigned, causing errors when
253 * the lhs is a larger type than the rhs.
255 img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
256 img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
258 img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
259 * img->stride[VPX_PLANE_U];
260 img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
262 img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1)
263 * img->stride[VPX_PLANE_V];
264 img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
266 img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
267 img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
270 void vpx_img_free(vpx_image_t *img) {
272 if (img->img_data && img->img_data_owner)
273 vpx_free(img->img_data);
275 if (img->self_allocd)