Line data Source code
1 : /*
2 : * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3 : *
4 : * Use of this source code is governed by a BSD-style license
5 : * that can be found in the LICENSE file in the root of the source
6 : * tree. An additional intellectual property rights grant can be found
7 : * in the file PATENTS. All contributing project authors may
8 : * be found in the AUTHORS file in the root of the source tree.
9 : */
10 :
11 : /* MFQE: Multiframe Quality Enhancement
12 : * In rate limited situations keyframes may cause significant visual artifacts
13 : * commonly referred to as "popping." This file implements a postproccesing
14 : * algorithm which blends data from the preceeding frame when there is no
15 : * motion and the q from the previous frame is lower which indicates that it is
16 : * higher quality.
17 : */
18 :
19 : #include "./vp8_rtcd.h"
20 : #include "./vpx_dsp_rtcd.h"
21 : #include "vp8/common/postproc.h"
22 : #include "vpx_dsp/variance.h"
23 : #include "vpx_mem/vpx_mem.h"
24 : #include "vpx_scale/yv12config.h"
25 :
26 : #include <limits.h>
27 : #include <stdlib.h>
28 :
29 0 : static void filter_by_weight(unsigned char *src, int src_stride,
30 : unsigned char *dst, int dst_stride, int block_size,
31 : int src_weight) {
32 0 : int dst_weight = (1 << MFQE_PRECISION) - src_weight;
33 0 : int rounding_bit = 1 << (MFQE_PRECISION - 1);
34 : int r, c;
35 :
36 0 : for (r = 0; r < block_size; ++r) {
37 0 : for (c = 0; c < block_size; ++c) {
38 0 : dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) >>
39 : MFQE_PRECISION;
40 : }
41 0 : src += src_stride;
42 0 : dst += dst_stride;
43 : }
44 0 : }
45 :
46 0 : void vp8_filter_by_weight16x16_c(unsigned char *src, int src_stride,
47 : unsigned char *dst, int dst_stride,
48 : int src_weight) {
49 0 : filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight);
50 0 : }
51 :
52 0 : void vp8_filter_by_weight8x8_c(unsigned char *src, int src_stride,
53 : unsigned char *dst, int dst_stride,
54 : int src_weight) {
55 0 : filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight);
56 0 : }
57 :
58 0 : void vp8_filter_by_weight4x4_c(unsigned char *src, int src_stride,
59 : unsigned char *dst, int dst_stride,
60 : int src_weight) {
61 0 : filter_by_weight(src, src_stride, dst, dst_stride, 4, src_weight);
62 0 : }
63 :
64 0 : static void apply_ifactor(unsigned char *y_src, int y_src_stride,
65 : unsigned char *y_dst, int y_dst_stride,
66 : unsigned char *u_src, unsigned char *v_src,
67 : int uv_src_stride, unsigned char *u_dst,
68 : unsigned char *v_dst, int uv_dst_stride,
69 : int block_size, int src_weight) {
70 0 : if (block_size == 16) {
71 0 : vp8_filter_by_weight16x16(y_src, y_src_stride, y_dst, y_dst_stride,
72 : src_weight);
73 0 : vp8_filter_by_weight8x8(u_src, uv_src_stride, u_dst, uv_dst_stride,
74 : src_weight);
75 0 : vp8_filter_by_weight8x8(v_src, uv_src_stride, v_dst, uv_dst_stride,
76 : src_weight);
77 : } else /* if (block_size == 8) */
78 : {
79 0 : vp8_filter_by_weight8x8(y_src, y_src_stride, y_dst, y_dst_stride,
80 : src_weight);
81 0 : vp8_filter_by_weight4x4(u_src, uv_src_stride, u_dst, uv_dst_stride,
82 : src_weight);
83 0 : vp8_filter_by_weight4x4(v_src, uv_src_stride, v_dst, uv_dst_stride,
84 : src_weight);
85 : }
86 0 : }
87 :
88 0 : static unsigned int int_sqrt(unsigned int x) {
89 0 : unsigned int y = x;
90 : unsigned int guess;
91 0 : int p = 1;
92 0 : while (y >>= 1) p++;
93 0 : p >>= 1;
94 :
95 0 : guess = 0;
96 0 : while (p >= 0) {
97 0 : guess |= (1 << p);
98 0 : if (x < guess * guess) guess -= (1 << p);
99 0 : p--;
100 : }
101 : /* choose between guess or guess+1 */
102 0 : return guess + (guess * guess + guess + 1 <= x);
103 : }
104 :
105 : #define USE_SSD
106 0 : static void multiframe_quality_enhance_block(
107 : int blksize, /* Currently only values supported are 16, 8 */
108 : int qcurr, int qprev, unsigned char *y, unsigned char *u, unsigned char *v,
109 : int y_stride, int uv_stride, unsigned char *yd, unsigned char *ud,
110 : unsigned char *vd, int yd_stride, int uvd_stride) {
111 : static const unsigned char VP8_ZEROS[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
112 : 0, 0, 0, 0, 0, 0, 0, 0 };
113 0 : int uvblksize = blksize >> 1;
114 0 : int qdiff = qcurr - qprev;
115 :
116 : int i;
117 : unsigned char *up;
118 : unsigned char *udp;
119 : unsigned char *vp;
120 : unsigned char *vdp;
121 :
122 : unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk;
123 :
124 0 : if (blksize == 16) {
125 0 : actd = (vpx_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse) + 128) >> 8;
126 0 : act = (vpx_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse) + 128) >> 8;
127 : #ifdef USE_SSD
128 0 : vpx_variance16x16(y, y_stride, yd, yd_stride, &sse);
129 0 : sad = (sse + 128) >> 8;
130 0 : vpx_variance8x8(u, uv_stride, ud, uvd_stride, &sse);
131 0 : usad = (sse + 32) >> 6;
132 0 : vpx_variance8x8(v, uv_stride, vd, uvd_stride, &sse);
133 0 : vsad = (sse + 32) >> 6;
134 : #else
135 : sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8;
136 : usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6;
137 : vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride) + 32) >> 6;
138 : #endif
139 : } else /* if (blksize == 8) */
140 : {
141 0 : actd = (vpx_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse) + 32) >> 6;
142 0 : act = (vpx_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse) + 32) >> 6;
143 : #ifdef USE_SSD
144 0 : vpx_variance8x8(y, y_stride, yd, yd_stride, &sse);
145 0 : sad = (sse + 32) >> 6;
146 0 : vpx_variance4x4(u, uv_stride, ud, uvd_stride, &sse);
147 0 : usad = (sse + 8) >> 4;
148 0 : vpx_variance4x4(v, uv_stride, vd, uvd_stride, &sse);
149 0 : vsad = (sse + 8) >> 4;
150 : #else
151 : sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6;
152 : usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4;
153 : vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4;
154 : #endif
155 : }
156 :
157 0 : actrisk = (actd > act * 5);
158 :
159 : /* thr = qdiff/16 + log2(act) + log4(qprev) */
160 0 : thr = (qdiff >> 4);
161 0 : while (actd >>= 1) thr++;
162 0 : while (qprev >>= 2) thr++;
163 :
164 : #ifdef USE_SSD
165 0 : thrsq = thr * thr;
166 0 : if (sad < thrsq &&
167 : /* additional checks for color mismatch and excessive addition of
168 : * high-frequencies */
169 0 : 4 * usad < thrsq && 4 * vsad < thrsq && !actrisk)
170 : #else
171 : if (sad < thr &&
172 : /* additional checks for color mismatch and excessive addition of
173 : * high-frequencies */
174 : 2 * usad < thr && 2 * vsad < thr && !actrisk)
175 : #endif
176 0 : {
177 : int ifactor;
178 : #ifdef USE_SSD
179 : /* TODO: optimize this later to not need sqr root */
180 0 : sad = int_sqrt(sad);
181 : #endif
182 0 : ifactor = (sad << MFQE_PRECISION) / thr;
183 0 : ifactor >>= (qdiff >> 5);
184 :
185 0 : if (ifactor) {
186 0 : apply_ifactor(y, y_stride, yd, yd_stride, u, v, uv_stride, ud, vd,
187 : uvd_stride, blksize, ifactor);
188 : }
189 : } else /* else implicitly copy from previous frame */
190 : {
191 0 : if (blksize == 16) {
192 0 : vp8_copy_mem16x16(y, y_stride, yd, yd_stride);
193 0 : vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride);
194 0 : vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride);
195 : } else /* if (blksize == 8) */
196 : {
197 0 : vp8_copy_mem8x8(y, y_stride, yd, yd_stride);
198 0 : for (up = u, udp = ud, i = 0; i < uvblksize;
199 0 : ++i, up += uv_stride, udp += uvd_stride) {
200 0 : memcpy(udp, up, uvblksize);
201 : }
202 0 : for (vp = v, vdp = vd, i = 0; i < uvblksize;
203 0 : ++i, vp += uv_stride, vdp += uvd_stride) {
204 0 : memcpy(vdp, vp, uvblksize);
205 : }
206 : }
207 : }
208 0 : }
209 :
210 0 : static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) {
211 0 : if (mode_info_context->mbmi.mb_skip_coeff) {
212 0 : map[0] = map[1] = map[2] = map[3] = 1;
213 0 : } else if (mode_info_context->mbmi.mode == SPLITMV) {
214 : static int ndx[4][4] = {
215 : { 0, 1, 4, 5 }, { 2, 3, 6, 7 }, { 8, 9, 12, 13 }, { 10, 11, 14, 15 }
216 : };
217 : int i, j;
218 0 : for (i = 0; i < 4; ++i) {
219 0 : map[i] = 1;
220 0 : for (j = 0; j < 4 && map[j]; ++j) {
221 0 : map[i] &= (mode_info_context->bmi[ndx[i][j]].mv.as_mv.row <= 2 &&
222 0 : mode_info_context->bmi[ndx[i][j]].mv.as_mv.col <= 2);
223 : }
224 : }
225 : } else {
226 0 : map[0] = map[1] = map[2] = map[3] =
227 0 : (mode_info_context->mbmi.mode > B_PRED &&
228 0 : abs(mode_info_context->mbmi.mv.as_mv.row) <= 2 &&
229 0 : abs(mode_info_context->mbmi.mv.as_mv.col) <= 2);
230 : }
231 0 : return (map[0] + map[1] + map[2] + map[3]);
232 : }
233 :
234 0 : void vp8_multiframe_quality_enhance(VP8_COMMON *cm) {
235 0 : YV12_BUFFER_CONFIG *show = cm->frame_to_show;
236 0 : YV12_BUFFER_CONFIG *dest = &cm->post_proc_buffer;
237 :
238 0 : FRAME_TYPE frame_type = cm->frame_type;
239 : /* Point at base of Mb MODE_INFO list has motion vectors etc */
240 0 : const MODE_INFO *mode_info_context = cm->show_frame_mi;
241 : int mb_row;
242 : int mb_col;
243 : int totmap, map[4];
244 0 : int qcurr = cm->base_qindex;
245 0 : int qprev = cm->postproc_state.last_base_qindex;
246 :
247 : unsigned char *y_ptr, *u_ptr, *v_ptr;
248 : unsigned char *yd_ptr, *ud_ptr, *vd_ptr;
249 :
250 : /* Set up the buffer pointers */
251 0 : y_ptr = show->y_buffer;
252 0 : u_ptr = show->u_buffer;
253 0 : v_ptr = show->v_buffer;
254 0 : yd_ptr = dest->y_buffer;
255 0 : ud_ptr = dest->u_buffer;
256 0 : vd_ptr = dest->v_buffer;
257 :
258 : /* postprocess each macro block */
259 0 : for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
260 0 : for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
261 : /* if motion is high there will likely be no benefit */
262 0 : if (frame_type == INTER_FRAME) {
263 0 : totmap = qualify_inter_mb(mode_info_context, map);
264 : } else {
265 0 : totmap = (frame_type == KEY_FRAME ? 4 : 0);
266 : }
267 0 : if (totmap) {
268 0 : if (totmap < 4) {
269 : int i, j;
270 0 : for (i = 0; i < 2; ++i) {
271 0 : for (j = 0; j < 2; ++j) {
272 0 : if (map[i * 2 + j]) {
273 0 : multiframe_quality_enhance_block(
274 0 : 8, qcurr, qprev, y_ptr + 8 * (i * show->y_stride + j),
275 0 : u_ptr + 4 * (i * show->uv_stride + j),
276 0 : v_ptr + 4 * (i * show->uv_stride + j), show->y_stride,
277 0 : show->uv_stride, yd_ptr + 8 * (i * dest->y_stride + j),
278 0 : ud_ptr + 4 * (i * dest->uv_stride + j),
279 0 : vd_ptr + 4 * (i * dest->uv_stride + j), dest->y_stride,
280 : dest->uv_stride);
281 : } else {
282 : /* copy a 8x8 block */
283 : int k;
284 0 : unsigned char *up = u_ptr + 4 * (i * show->uv_stride + j);
285 0 : unsigned char *udp = ud_ptr + 4 * (i * dest->uv_stride + j);
286 0 : unsigned char *vp = v_ptr + 4 * (i * show->uv_stride + j);
287 0 : unsigned char *vdp = vd_ptr + 4 * (i * dest->uv_stride + j);
288 0 : vp8_copy_mem8x8(
289 0 : y_ptr + 8 * (i * show->y_stride + j), show->y_stride,
290 0 : yd_ptr + 8 * (i * dest->y_stride + j), dest->y_stride);
291 0 : for (k = 0; k < 4; ++k, up += show->uv_stride,
292 0 : udp += dest->uv_stride, vp += show->uv_stride,
293 0 : vdp += dest->uv_stride) {
294 0 : memcpy(udp, up, 4);
295 0 : memcpy(vdp, vp, 4);
296 : }
297 : }
298 : }
299 : }
300 : } else /* totmap = 4 */
301 : {
302 0 : multiframe_quality_enhance_block(
303 : 16, qcurr, qprev, y_ptr, u_ptr, v_ptr, show->y_stride,
304 : show->uv_stride, yd_ptr, ud_ptr, vd_ptr, dest->y_stride,
305 : dest->uv_stride);
306 : }
307 : } else {
308 0 : vp8_copy_mem16x16(y_ptr, show->y_stride, yd_ptr, dest->y_stride);
309 0 : vp8_copy_mem8x8(u_ptr, show->uv_stride, ud_ptr, dest->uv_stride);
310 0 : vp8_copy_mem8x8(v_ptr, show->uv_stride, vd_ptr, dest->uv_stride);
311 : }
312 0 : y_ptr += 16;
313 0 : u_ptr += 8;
314 0 : v_ptr += 8;
315 0 : yd_ptr += 16;
316 0 : ud_ptr += 8;
317 0 : vd_ptr += 8;
318 0 : mode_info_context++; /* step to next MB */
319 : }
320 :
321 0 : y_ptr += show->y_stride * 16 - 16 * cm->mb_cols;
322 0 : u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
323 0 : v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols;
324 0 : yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols;
325 0 : ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
326 0 : vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols;
327 :
328 0 : mode_info_context++; /* Skip border mb */
329 : }
330 0 : }
|