Line data Source code
1 : /*
2 : * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 : *
4 : * Use of this source code is governed by a BSD-style license
5 : * that can be found in the LICENSE file in the root of the source
6 : * tree. An additional intellectual property rights grant can be found
7 : * in the file PATENTS. All contributing project authors may
8 : * be found in the AUTHORS file in the root of the source tree.
9 : */
10 :
11 : #include "treereader.h"
12 : #include "vp8/common/entropymv.h"
13 : #include "vp8/common/entropymode.h"
14 : #include "onyxd_int.h"
15 : #include "vp8/common/findnearmv.h"
16 :
17 0 : static B_PREDICTION_MODE read_bmode(vp8_reader *bc, const vp8_prob *p) {
18 0 : const int i = vp8_treed_read(bc, vp8_bmode_tree, p);
19 :
20 0 : return (B_PREDICTION_MODE)i;
21 : }
22 :
23 0 : static MB_PREDICTION_MODE read_ymode(vp8_reader *bc, const vp8_prob *p) {
24 0 : const int i = vp8_treed_read(bc, vp8_ymode_tree, p);
25 :
26 0 : return (MB_PREDICTION_MODE)i;
27 : }
28 :
29 0 : static MB_PREDICTION_MODE read_kf_ymode(vp8_reader *bc, const vp8_prob *p) {
30 0 : const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p);
31 :
32 0 : return (MB_PREDICTION_MODE)i;
33 : }
34 :
35 0 : static MB_PREDICTION_MODE read_uv_mode(vp8_reader *bc, const vp8_prob *p) {
36 0 : const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p);
37 :
38 0 : return (MB_PREDICTION_MODE)i;
39 : }
40 :
41 0 : static void read_kf_modes(VP8D_COMP *pbi, MODE_INFO *mi) {
42 0 : vp8_reader *const bc = &pbi->mbc[8];
43 0 : const int mis = pbi->common.mode_info_stride;
44 :
45 0 : mi->mbmi.ref_frame = INTRA_FRAME;
46 0 : mi->mbmi.mode = read_kf_ymode(bc, vp8_kf_ymode_prob);
47 :
48 0 : if (mi->mbmi.mode == B_PRED) {
49 0 : int i = 0;
50 0 : mi->mbmi.is_4x4 = 1;
51 :
52 : do {
53 0 : const B_PREDICTION_MODE A = above_block_mode(mi, i, mis);
54 0 : const B_PREDICTION_MODE L = left_block_mode(mi, i);
55 :
56 0 : mi->bmi[i].as_mode = read_bmode(bc, vp8_kf_bmode_prob[A][L]);
57 0 : } while (++i < 16);
58 : }
59 :
60 0 : mi->mbmi.uv_mode = read_uv_mode(bc, vp8_kf_uv_mode_prob);
61 0 : }
62 :
63 0 : static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) {
64 0 : const vp8_prob *const p = (const vp8_prob *)mvc;
65 0 : int x = 0;
66 :
67 0 : if (vp8_read(r, p[mvpis_short])) /* Large */
68 : {
69 0 : int i = 0;
70 :
71 : do {
72 0 : x += vp8_read(r, p[MVPbits + i]) << i;
73 0 : } while (++i < 3);
74 :
75 0 : i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
76 :
77 : do {
78 0 : x += vp8_read(r, p[MVPbits + i]) << i;
79 0 : } while (--i > 3);
80 :
81 0 : if (!(x & 0xFFF0) || vp8_read(r, p[MVPbits + 3])) x += 8;
82 : } else { /* small */
83 0 : x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort);
84 : }
85 :
86 0 : if (x && vp8_read(r, p[MVPsign])) x = -x;
87 :
88 0 : return x;
89 : }
90 :
91 0 : static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) {
92 0 : mv->row = (short)(read_mvcomponent(r, mvc) * 2);
93 0 : mv->col = (short)(read_mvcomponent(r, ++mvc) * 2);
94 0 : }
95 :
96 0 : static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) {
97 0 : int i = 0;
98 :
99 : do {
100 0 : const vp8_prob *up = vp8_mv_update_probs[i].prob;
101 0 : vp8_prob *p = (vp8_prob *)(mvc + i);
102 0 : vp8_prob *const pstop = p + MVPcount;
103 :
104 : do {
105 0 : if (vp8_read(bc, *up++)) {
106 0 : const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7);
107 :
108 0 : *p = x ? x << 1 : 1;
109 : }
110 0 : } while (++p < pstop);
111 0 : } while (++i < 2);
112 0 : }
113 :
114 : static const unsigned char mbsplit_fill_count[4] = { 8, 8, 4, 1 };
115 : static const unsigned char mbsplit_fill_offset[4][16] = {
116 : { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
117 : { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 },
118 : { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 },
119 : { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }
120 : };
121 :
122 0 : static void mb_mode_mv_init(VP8D_COMP *pbi) {
123 0 : vp8_reader *const bc = &pbi->mbc[8];
124 0 : MV_CONTEXT *const mvc = pbi->common.fc.mvc;
125 :
126 : #if CONFIG_ERROR_CONCEALMENT
127 : /* Default is that no macroblock is corrupt, therefore we initialize
128 : * mvs_corrupt_from_mb to something very big, which we can be sure is
129 : * outside the frame. */
130 : pbi->mvs_corrupt_from_mb = UINT_MAX;
131 : #endif
132 : /* Read the mb_no_coeff_skip flag */
133 0 : pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc);
134 :
135 0 : pbi->prob_skip_false = 0;
136 0 : if (pbi->common.mb_no_coeff_skip) {
137 0 : pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8);
138 : }
139 :
140 0 : if (pbi->common.frame_type != KEY_FRAME) {
141 0 : pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8);
142 0 : pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8);
143 0 : pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8);
144 :
145 0 : if (vp8_read_bit(bc)) {
146 0 : int i = 0;
147 :
148 : do {
149 0 : pbi->common.fc.ymode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8);
150 0 : } while (++i < 4);
151 : }
152 :
153 0 : if (vp8_read_bit(bc)) {
154 0 : int i = 0;
155 :
156 : do {
157 0 : pbi->common.fc.uv_mode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8);
158 0 : } while (++i < 3);
159 : }
160 :
161 0 : read_mvcontexts(bc, mvc);
162 : }
163 0 : }
164 :
165 : const vp8_prob vp8_sub_mv_ref_prob3[8][VP8_SUBMVREFS - 1] = {
166 : { 147, 136, 18 }, /* SUBMVREF_NORMAL */
167 : { 223, 1, 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */
168 : { 106, 145, 1 }, /* SUBMVREF_LEFT_ZED */
169 : { 208, 1, 1 }, /* SUBMVREF_LEFT_ABOVE_ZED */
170 : { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */
171 : { 223, 1, 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */
172 : { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */
173 : { 208, 1, 1 } /* SUBMVREF_LEFT_ABOVE_ZED */
174 : };
175 :
176 0 : static const vp8_prob *get_sub_mv_ref_prob(const int left, const int above) {
177 0 : int lez = (left == 0);
178 0 : int aez = (above == 0);
179 0 : int lea = (left == above);
180 : const vp8_prob *prob;
181 :
182 0 : prob = vp8_sub_mv_ref_prob3[(aez << 2) | (lez << 1) | (lea)];
183 :
184 0 : return prob;
185 : }
186 :
187 0 : static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi,
188 : const MODE_INFO *left_mb, const MODE_INFO *above_mb,
189 : MB_MODE_INFO *mbmi, int_mv best_mv,
190 : MV_CONTEXT *const mvc, int mb_to_left_edge,
191 : int mb_to_right_edge, int mb_to_top_edge,
192 : int mb_to_bottom_edge) {
193 : int s; /* split configuration (16x8, 8x16, 8x8, 4x4) */
194 : /* number of partitions in the split configuration (see vp8_mbsplit_count) */
195 : int num_p;
196 0 : int j = 0;
197 :
198 0 : s = 3;
199 0 : num_p = 16;
200 0 : if (vp8_read(bc, 110)) {
201 0 : s = 2;
202 0 : num_p = 4;
203 0 : if (vp8_read(bc, 111)) {
204 0 : s = vp8_read(bc, 150);
205 0 : num_p = 2;
206 : }
207 : }
208 :
209 : do /* for each subset j */
210 : {
211 : int_mv leftmv, abovemv;
212 : int_mv blockmv;
213 : int k; /* first block in subset j */
214 :
215 : const vp8_prob *prob;
216 0 : k = vp8_mbsplit_offset[s][j];
217 :
218 0 : if (!(k & 3)) {
219 : /* On L edge, get from MB to left of us */
220 0 : if (left_mb->mbmi.mode != SPLITMV) {
221 0 : leftmv.as_int = left_mb->mbmi.mv.as_int;
222 : } else {
223 0 : leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int;
224 : }
225 : } else {
226 0 : leftmv.as_int = (mi->bmi + k - 1)->mv.as_int;
227 : }
228 :
229 0 : if (!(k >> 2)) {
230 : /* On top edge, get from MB above us */
231 0 : if (above_mb->mbmi.mode != SPLITMV) {
232 0 : abovemv.as_int = above_mb->mbmi.mv.as_int;
233 : } else {
234 0 : abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int;
235 : }
236 : } else {
237 0 : abovemv.as_int = (mi->bmi + k - 4)->mv.as_int;
238 : }
239 :
240 0 : prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int);
241 :
242 0 : if (vp8_read(bc, prob[0])) {
243 0 : if (vp8_read(bc, prob[1])) {
244 0 : blockmv.as_int = 0;
245 0 : if (vp8_read(bc, prob[2])) {
246 0 : blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2;
247 0 : blockmv.as_mv.row += best_mv.as_mv.row;
248 0 : blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2;
249 0 : blockmv.as_mv.col += best_mv.as_mv.col;
250 : }
251 : } else {
252 0 : blockmv.as_int = abovemv.as_int;
253 : }
254 : } else {
255 0 : blockmv.as_int = leftmv.as_int;
256 : }
257 :
258 0 : mbmi->need_to_clamp_mvs |=
259 0 : vp8_check_mv_bounds(&blockmv, mb_to_left_edge, mb_to_right_edge,
260 : mb_to_top_edge, mb_to_bottom_edge);
261 :
262 : {
263 : /* Fill (uniform) modes, mvs of jth subset.
264 : Must do it here because ensuing subsets can
265 : refer back to us via "left" or "above". */
266 : const unsigned char *fill_offset;
267 0 : unsigned int fill_count = mbsplit_fill_count[s];
268 :
269 0 : fill_offset =
270 0 : &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]];
271 :
272 : do {
273 0 : mi->bmi[*fill_offset].mv.as_int = blockmv.as_int;
274 0 : fill_offset++;
275 0 : } while (--fill_count);
276 : }
277 :
278 0 : } while (++j < num_p);
279 :
280 0 : mbmi->partitioning = s;
281 0 : }
282 :
283 0 : static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi,
284 : MB_MODE_INFO *mbmi) {
285 0 : vp8_reader *const bc = &pbi->mbc[8];
286 0 : mbmi->ref_frame = (MV_REFERENCE_FRAME)vp8_read(bc, pbi->prob_intra);
287 0 : if (mbmi->ref_frame) /* inter MB */
288 : {
289 : enum { CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
290 : int cnt[4];
291 0 : int *cntx = cnt;
292 : int_mv near_mvs[4];
293 0 : int_mv *nmv = near_mvs;
294 0 : const int mis = pbi->mb.mode_info_stride;
295 0 : const MODE_INFO *above = mi - mis;
296 0 : const MODE_INFO *left = mi - 1;
297 0 : const MODE_INFO *aboveleft = above - 1;
298 0 : int *ref_frame_sign_bias = pbi->common.ref_frame_sign_bias;
299 :
300 0 : mbmi->need_to_clamp_mvs = 0;
301 :
302 0 : if (vp8_read(bc, pbi->prob_last)) {
303 0 : mbmi->ref_frame =
304 0 : (MV_REFERENCE_FRAME)((int)(2 + vp8_read(bc, pbi->prob_gf)));
305 : }
306 :
307 : /* Zero accumulators */
308 0 : nmv[0].as_int = nmv[1].as_int = nmv[2].as_int = 0;
309 0 : cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0;
310 :
311 : /* Process above */
312 0 : if (above->mbmi.ref_frame != INTRA_FRAME) {
313 0 : if (above->mbmi.mv.as_int) {
314 0 : (++nmv)->as_int = above->mbmi.mv.as_int;
315 0 : mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], mbmi->ref_frame,
316 : nmv, ref_frame_sign_bias);
317 0 : ++cntx;
318 : }
319 :
320 0 : *cntx += 2;
321 : }
322 :
323 : /* Process left */
324 0 : if (left->mbmi.ref_frame != INTRA_FRAME) {
325 0 : if (left->mbmi.mv.as_int) {
326 : int_mv this_mv;
327 :
328 0 : this_mv.as_int = left->mbmi.mv.as_int;
329 0 : mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], mbmi->ref_frame,
330 : &this_mv, ref_frame_sign_bias);
331 :
332 0 : if (this_mv.as_int != nmv->as_int) {
333 0 : (++nmv)->as_int = this_mv.as_int;
334 0 : ++cntx;
335 : }
336 :
337 0 : *cntx += 2;
338 : } else {
339 0 : cnt[CNT_INTRA] += 2;
340 : }
341 : }
342 :
343 : /* Process above left */
344 0 : if (aboveleft->mbmi.ref_frame != INTRA_FRAME) {
345 0 : if (aboveleft->mbmi.mv.as_int) {
346 : int_mv this_mv;
347 :
348 0 : this_mv.as_int = aboveleft->mbmi.mv.as_int;
349 0 : mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], mbmi->ref_frame,
350 : &this_mv, ref_frame_sign_bias);
351 :
352 0 : if (this_mv.as_int != nmv->as_int) {
353 0 : (++nmv)->as_int = this_mv.as_int;
354 0 : ++cntx;
355 : }
356 :
357 0 : *cntx += 1;
358 : } else {
359 0 : cnt[CNT_INTRA] += 1;
360 : }
361 : }
362 :
363 0 : if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_INTRA]][0])) {
364 : /* If we have three distinct MV's ... */
365 : /* See if above-left MV can be merged with NEAREST */
366 0 : cnt[CNT_NEAREST] += ((cnt[CNT_SPLITMV] > 0) &
367 0 : (nmv->as_int == near_mvs[CNT_NEAREST].as_int));
368 :
369 : /* Swap near and nearest if necessary */
370 0 : if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
371 : int tmp;
372 0 : tmp = cnt[CNT_NEAREST];
373 0 : cnt[CNT_NEAREST] = cnt[CNT_NEAR];
374 0 : cnt[CNT_NEAR] = tmp;
375 0 : tmp = near_mvs[CNT_NEAREST].as_int;
376 0 : near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int;
377 0 : near_mvs[CNT_NEAR].as_int = tmp;
378 : }
379 :
380 0 : if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
381 0 : if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
382 : int mb_to_top_edge;
383 : int mb_to_bottom_edge;
384 : int mb_to_left_edge;
385 : int mb_to_right_edge;
386 0 : MV_CONTEXT *const mvc = pbi->common.fc.mvc;
387 : int near_index;
388 :
389 0 : mb_to_top_edge = pbi->mb.mb_to_top_edge;
390 0 : mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge;
391 0 : mb_to_top_edge -= LEFT_TOP_MARGIN;
392 0 : mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN;
393 0 : mb_to_right_edge = pbi->mb.mb_to_right_edge;
394 0 : mb_to_right_edge += RIGHT_BOTTOM_MARGIN;
395 0 : mb_to_left_edge = pbi->mb.mb_to_left_edge;
396 0 : mb_to_left_edge -= LEFT_TOP_MARGIN;
397 :
398 : /* Use near_mvs[0] to store the "best" MV */
399 0 : near_index = CNT_INTRA + (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]);
400 :
401 0 : vp8_clamp_mv2(&near_mvs[near_index], &pbi->mb);
402 :
403 0 : cnt[CNT_SPLITMV] =
404 0 : ((above->mbmi.mode == SPLITMV) + (left->mbmi.mode == SPLITMV)) *
405 0 : 2 +
406 0 : (aboveleft->mbmi.mode == SPLITMV);
407 :
408 0 : if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
409 0 : decode_split_mv(bc, mi, left, above, mbmi, near_mvs[near_index],
410 : mvc, mb_to_left_edge, mb_to_right_edge,
411 : mb_to_top_edge, mb_to_bottom_edge);
412 0 : mbmi->mv.as_int = mi->bmi[15].mv.as_int;
413 0 : mbmi->mode = SPLITMV;
414 0 : mbmi->is_4x4 = 1;
415 : } else {
416 0 : int_mv *const mbmi_mv = &mbmi->mv;
417 0 : read_mv(bc, &mbmi_mv->as_mv, (const MV_CONTEXT *)mvc);
418 0 : mbmi_mv->as_mv.row += near_mvs[near_index].as_mv.row;
419 0 : mbmi_mv->as_mv.col += near_mvs[near_index].as_mv.col;
420 :
421 : /* Don't need to check this on NEARMV and NEARESTMV
422 : * modes since those modes clamp the MV. The NEWMV mode
423 : * does not, so signal to the prediction stage whether
424 : * special handling may be required.
425 : */
426 0 : mbmi->need_to_clamp_mvs =
427 0 : vp8_check_mv_bounds(mbmi_mv, mb_to_left_edge, mb_to_right_edge,
428 : mb_to_top_edge, mb_to_bottom_edge);
429 0 : mbmi->mode = NEWMV;
430 : }
431 : } else {
432 0 : mbmi->mode = NEARMV;
433 0 : mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int;
434 0 : vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
435 : }
436 : } else {
437 0 : mbmi->mode = NEARESTMV;
438 0 : mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int;
439 0 : vp8_clamp_mv2(&mbmi->mv, &pbi->mb);
440 : }
441 : } else {
442 0 : mbmi->mode = ZEROMV;
443 0 : mbmi->mv.as_int = 0;
444 : }
445 :
446 : #if CONFIG_ERROR_CONCEALMENT
447 : if (pbi->ec_enabled && (mbmi->mode != SPLITMV)) {
448 : mi->bmi[0].mv.as_int = mi->bmi[1].mv.as_int = mi->bmi[2].mv.as_int =
449 : mi->bmi[3].mv.as_int = mi->bmi[4].mv.as_int = mi->bmi[5].mv.as_int =
450 : mi->bmi[6].mv.as_int = mi->bmi[7].mv.as_int =
451 : mi->bmi[8].mv.as_int = mi->bmi[9].mv.as_int =
452 : mi->bmi[10].mv.as_int = mi->bmi[11].mv.as_int =
453 : mi->bmi[12].mv.as_int = mi->bmi[13].mv.as_int =
454 : mi->bmi[14].mv.as_int = mi->bmi[15].mv.as_int =
455 : mbmi->mv.as_int;
456 : }
457 : #endif
458 : } else {
459 : /* required for left and above block mv */
460 0 : mbmi->mv.as_int = 0;
461 :
462 : /* MB is intra coded */
463 0 : if ((mbmi->mode = read_ymode(bc, pbi->common.fc.ymode_prob)) == B_PRED) {
464 0 : int j = 0;
465 0 : mbmi->is_4x4 = 1;
466 : do {
467 0 : mi->bmi[j].as_mode = read_bmode(bc, pbi->common.fc.bmode_prob);
468 0 : } while (++j < 16);
469 : }
470 :
471 0 : mbmi->uv_mode = read_uv_mode(bc, pbi->common.fc.uv_mode_prob);
472 : }
473 0 : }
474 :
475 0 : static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) {
476 : /* Is segmentation enabled */
477 0 : if (x->segmentation_enabled && x->update_mb_segmentation_map) {
478 : /* If so then read the segment id. */
479 0 : if (vp8_read(r, x->mb_segment_tree_probs[0])) {
480 0 : mi->segment_id =
481 0 : (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2]));
482 : } else {
483 0 : mi->segment_id =
484 0 : (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1]));
485 : }
486 : }
487 0 : }
488 :
489 0 : static void decode_mb_mode_mvs(VP8D_COMP *pbi, MODE_INFO *mi,
490 : MB_MODE_INFO *mbmi) {
491 : (void)mbmi;
492 :
493 : /* Read the Macroblock segmentation map if it is being updated explicitly
494 : * this frame (reset to 0 above by default)
495 : * By default on a key frame reset all MBs to segment 0
496 : */
497 0 : if (pbi->mb.update_mb_segmentation_map) {
498 0 : read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb);
499 0 : } else if (pbi->common.frame_type == KEY_FRAME) {
500 0 : mi->mbmi.segment_id = 0;
501 : }
502 :
503 : /* Read the macroblock coeff skip flag if this feature is in use,
504 : * else default to 0 */
505 0 : if (pbi->common.mb_no_coeff_skip) {
506 0 : mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false);
507 : } else {
508 0 : mi->mbmi.mb_skip_coeff = 0;
509 : }
510 :
511 0 : mi->mbmi.is_4x4 = 0;
512 0 : if (pbi->common.frame_type == KEY_FRAME) {
513 0 : read_kf_modes(pbi, mi);
514 : } else {
515 0 : read_mb_modes_mv(pbi, mi, &mi->mbmi);
516 : }
517 0 : }
518 :
519 0 : void vp8_decode_mode_mvs(VP8D_COMP *pbi) {
520 0 : MODE_INFO *mi = pbi->common.mi;
521 0 : int mb_row = -1;
522 : int mb_to_right_edge_start;
523 :
524 0 : mb_mode_mv_init(pbi);
525 :
526 0 : pbi->mb.mb_to_top_edge = 0;
527 0 : pbi->mb.mb_to_bottom_edge = ((pbi->common.mb_rows - 1) * 16) << 3;
528 0 : mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3;
529 :
530 0 : while (++mb_row < pbi->common.mb_rows) {
531 0 : int mb_col = -1;
532 :
533 0 : pbi->mb.mb_to_left_edge = 0;
534 0 : pbi->mb.mb_to_right_edge = mb_to_right_edge_start;
535 :
536 0 : while (++mb_col < pbi->common.mb_cols) {
537 : #if CONFIG_ERROR_CONCEALMENT
538 : int mb_num = mb_row * pbi->common.mb_cols + mb_col;
539 : #endif
540 :
541 0 : decode_mb_mode_mvs(pbi, mi, &mi->mbmi);
542 :
543 : #if CONFIG_ERROR_CONCEALMENT
544 : /* look for corruption. set mvs_corrupt_from_mb to the current
545 : * mb_num if the frame is corrupt from this macroblock. */
546 : if (vp8dx_bool_error(&pbi->mbc[8]) &&
547 : mb_num < (int)pbi->mvs_corrupt_from_mb) {
548 : pbi->mvs_corrupt_from_mb = mb_num;
549 : /* no need to continue since the partition is corrupt from
550 : * here on.
551 : */
552 : return;
553 : }
554 : #endif
555 :
556 0 : pbi->mb.mb_to_left_edge -= (16 << 3);
557 0 : pbi->mb.mb_to_right_edge -= (16 << 3);
558 0 : mi++; /* next macroblock */
559 : }
560 0 : pbi->mb.mb_to_top_edge -= (16 << 3);
561 0 : pbi->mb.mb_to_bottom_edge -= (16 << 3);
562 :
563 0 : mi++; /* skip left predictor each row */
564 : }
565 0 : }
|