Line data Source code
1 : /*
2 : * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 : *
4 : * Use of this source code is governed by a BSD-style license
5 : * that can be found in the LICENSE file in the root of the source
6 : * tree. An additional intellectual property rights grant can be found
7 : * in the file PATENTS. All contributing project authors may
8 : * be found in the AUTHORS file in the root of the source tree.
9 : */
10 :
11 : #include <assert.h>
12 : #include <stdio.h>
13 : #include <limits.h>
14 :
15 : #include "vpx/vpx_encoder.h"
16 : #include "vpx_dsp/bitwriter_buffer.h"
17 : #include "vpx_dsp/vpx_dsp_common.h"
18 : #include "vpx_mem/vpx_mem.h"
19 : #include "vpx_ports/mem_ops.h"
20 : #include "vpx_ports/system_state.h"
21 :
22 : #include "vp9/common/vp9_entropy.h"
23 : #include "vp9/common/vp9_entropymode.h"
24 : #include "vp9/common/vp9_entropymv.h"
25 : #include "vp9/common/vp9_mvref_common.h"
26 : #include "vp9/common/vp9_pred_common.h"
27 : #include "vp9/common/vp9_seg_common.h"
28 : #include "vp9/common/vp9_tile_common.h"
29 :
30 : #include "vp9/encoder/vp9_cost.h"
31 : #include "vp9/encoder/vp9_bitstream.h"
32 : #include "vp9/encoder/vp9_encodemv.h"
33 : #include "vp9/encoder/vp9_mcomp.h"
34 : #include "vp9/encoder/vp9_segmentation.h"
35 : #include "vp9/encoder/vp9_subexp.h"
36 : #include "vp9/encoder/vp9_tokenize.h"
37 :
38 : static const struct vp9_token intra_mode_encodings[INTRA_MODES] = {
39 : { 0, 1 }, { 6, 3 }, { 28, 5 }, { 30, 5 }, { 58, 6 },
40 : { 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 }
41 : };
42 : static const struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
43 : { { 0, 1 }, { 2, 2 }, { 3, 2 } };
44 : static const struct vp9_token partition_encodings[PARTITION_TYPES] = {
45 : { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
46 : };
47 : static const struct vp9_token inter_mode_encodings[INTER_MODES] = {
48 : { 2, 2 }, { 6, 3 }, { 0, 1 }, { 7, 3 }
49 : };
50 :
51 0 : static void write_intra_mode(vpx_writer *w, PREDICTION_MODE mode,
52 : const vpx_prob *probs) {
53 0 : vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]);
54 0 : }
55 :
56 0 : static void write_inter_mode(vpx_writer *w, PREDICTION_MODE mode,
57 : const vpx_prob *probs) {
58 0 : assert(is_inter_mode(mode));
59 0 : vp9_write_token(w, vp9_inter_mode_tree, probs,
60 0 : &inter_mode_encodings[INTER_OFFSET(mode)]);
61 0 : }
62 :
63 0 : static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, int data,
64 : int max) {
65 0 : vpx_wb_write_literal(wb, data, get_unsigned_bits(max));
66 0 : }
67 :
68 0 : static void prob_diff_update(const vpx_tree_index *tree,
69 : vpx_prob probs[/*n - 1*/],
70 : const unsigned int counts[/*n - 1*/], int n,
71 : vpx_writer *w) {
72 : int i;
73 : unsigned int branch_ct[32][2];
74 :
75 : // Assuming max number of probabilities <= 32
76 0 : assert(n <= 32);
77 :
78 0 : vp9_tree_probs_from_distribution(tree, branch_ct, counts);
79 0 : for (i = 0; i < n - 1; ++i)
80 0 : vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
81 0 : }
82 :
83 0 : static void write_selected_tx_size(const VP9_COMMON *cm,
84 : const MACROBLOCKD *const xd, vpx_writer *w) {
85 0 : TX_SIZE tx_size = xd->mi[0]->tx_size;
86 0 : BLOCK_SIZE bsize = xd->mi[0]->sb_type;
87 0 : const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
88 0 : const vpx_prob *const tx_probs =
89 0 : get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
90 0 : vpx_write(w, tx_size != TX_4X4, tx_probs[0]);
91 0 : if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
92 0 : vpx_write(w, tx_size != TX_8X8, tx_probs[1]);
93 0 : if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
94 0 : vpx_write(w, tx_size != TX_16X16, tx_probs[2]);
95 : }
96 0 : }
97 :
98 0 : static int write_skip(const VP9_COMMON *cm, const MACROBLOCKD *const xd,
99 : int segment_id, const MODE_INFO *mi, vpx_writer *w) {
100 0 : if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
101 0 : return 1;
102 : } else {
103 0 : const int skip = mi->skip;
104 0 : vpx_write(w, skip, vp9_get_skip_prob(cm, xd));
105 0 : return skip;
106 : }
107 : }
108 :
109 0 : static void update_skip_probs(VP9_COMMON *cm, vpx_writer *w,
110 : FRAME_COUNTS *counts) {
111 : int k;
112 :
113 0 : for (k = 0; k < SKIP_CONTEXTS; ++k)
114 0 : vp9_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
115 0 : }
116 :
117 0 : static void update_switchable_interp_probs(VP9_COMMON *cm, vpx_writer *w,
118 : FRAME_COUNTS *counts) {
119 : int j;
120 0 : for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
121 0 : prob_diff_update(vp9_switchable_interp_tree,
122 0 : cm->fc->switchable_interp_prob[j],
123 0 : counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
124 0 : }
125 :
126 0 : static void pack_mb_tokens(vpx_writer *w, TOKENEXTRA **tp,
127 : const TOKENEXTRA *const stop,
128 : vpx_bit_depth_t bit_depth) {
129 : const TOKENEXTRA *p;
130 0 : const vp9_extra_bit *const extra_bits =
131 : #if CONFIG_VP9_HIGHBITDEPTH
132 : (bit_depth == VPX_BITS_12)
133 : ? vp9_extra_bits_high12
134 : : (bit_depth == VPX_BITS_10) ? vp9_extra_bits_high10 : vp9_extra_bits;
135 : #else
136 : vp9_extra_bits;
137 : (void)bit_depth;
138 : #endif // CONFIG_VP9_HIGHBITDEPTH
139 :
140 0 : for (p = *tp; p < stop && p->token != EOSB_TOKEN; ++p) {
141 0 : if (p->token == EOB_TOKEN) {
142 0 : vpx_write(w, 0, p->context_tree[0]);
143 0 : continue;
144 : }
145 0 : vpx_write(w, 1, p->context_tree[0]);
146 0 : while (p->token == ZERO_TOKEN) {
147 0 : vpx_write(w, 0, p->context_tree[1]);
148 0 : ++p;
149 0 : if (p == stop || p->token == EOSB_TOKEN) {
150 0 : *tp = (TOKENEXTRA *)(uintptr_t)p + (p->token == EOSB_TOKEN);
151 0 : return;
152 : }
153 : }
154 :
155 : {
156 0 : const int t = p->token;
157 0 : const vpx_prob *const context_tree = p->context_tree;
158 0 : assert(t != ZERO_TOKEN);
159 0 : assert(t != EOB_TOKEN);
160 0 : assert(t != EOSB_TOKEN);
161 0 : vpx_write(w, 1, context_tree[1]);
162 0 : if (t == ONE_TOKEN) {
163 0 : vpx_write(w, 0, context_tree[2]);
164 0 : vpx_write_bit(w, p->extra & 1);
165 : } else { // t >= TWO_TOKEN && t < EOB_TOKEN
166 0 : const struct vp9_token *const a = &vp9_coef_encodings[t];
167 0 : const int v = a->value;
168 0 : const int n = a->len;
169 0 : const int e = p->extra;
170 0 : vpx_write(w, 1, context_tree[2]);
171 0 : vp9_write_tree(w, vp9_coef_con_tree,
172 0 : vp9_pareto8_full[context_tree[PIVOT_NODE] - 1], v,
173 : n - UNCONSTRAINED_NODES, 0);
174 0 : if (t >= CATEGORY1_TOKEN) {
175 0 : const vp9_extra_bit *const b = &extra_bits[t];
176 0 : const unsigned char *pb = b->prob;
177 0 : int v = e >> 1;
178 0 : int n = b->len; // number of bits in v, assumed nonzero
179 : do {
180 0 : const int bb = (v >> --n) & 1;
181 0 : vpx_write(w, bb, *pb++);
182 0 : } while (n);
183 : }
184 0 : vpx_write_bit(w, e & 1);
185 : }
186 : }
187 : }
188 0 : *tp = (TOKENEXTRA *)(uintptr_t)p + (p->token == EOSB_TOKEN);
189 : }
190 :
191 0 : static void write_segment_id(vpx_writer *w, const struct segmentation *seg,
192 : int segment_id) {
193 0 : if (seg->enabled && seg->update_map)
194 0 : vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
195 0 : }
196 :
197 : // This function encodes the reference frame
198 0 : static void write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *const xd,
199 : vpx_writer *w) {
200 0 : const MODE_INFO *const mi = xd->mi[0];
201 0 : const int is_compound = has_second_ref(mi);
202 0 : const int segment_id = mi->segment_id;
203 :
204 : // If segment level coding of this signal is disabled...
205 : // or the segment allows multiple reference frame options
206 0 : if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
207 0 : assert(!is_compound);
208 0 : assert(mi->ref_frame[0] ==
209 : get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
210 : } else {
211 : // does the feature use compound prediction or not
212 : // (if not specified at the frame/segment level)
213 0 : if (cm->reference_mode == REFERENCE_MODE_SELECT) {
214 0 : vpx_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd));
215 : } else {
216 0 : assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
217 : }
218 :
219 0 : if (is_compound) {
220 0 : vpx_write(w, mi->ref_frame[0] == GOLDEN_FRAME,
221 0 : vp9_get_pred_prob_comp_ref_p(cm, xd));
222 : } else {
223 0 : const int bit0 = mi->ref_frame[0] != LAST_FRAME;
224 0 : vpx_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd));
225 0 : if (bit0) {
226 0 : const int bit1 = mi->ref_frame[0] != GOLDEN_FRAME;
227 0 : vpx_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd));
228 : }
229 : }
230 : }
231 0 : }
232 :
233 0 : static void pack_inter_mode_mvs(VP9_COMP *cpi, const MACROBLOCKD *const xd,
234 : const MB_MODE_INFO_EXT *const mbmi_ext,
235 : vpx_writer *w,
236 : unsigned int *const max_mv_magnitude,
237 : int interp_filter_selected[MAX_REF_FRAMES]
238 : [SWITCHABLE]) {
239 0 : VP9_COMMON *const cm = &cpi->common;
240 0 : const nmv_context *nmvc = &cm->fc->nmvc;
241 0 : const struct segmentation *const seg = &cm->seg;
242 0 : const MODE_INFO *const mi = xd->mi[0];
243 0 : const PREDICTION_MODE mode = mi->mode;
244 0 : const int segment_id = mi->segment_id;
245 0 : const BLOCK_SIZE bsize = mi->sb_type;
246 0 : const int allow_hp = cm->allow_high_precision_mv;
247 0 : const int is_inter = is_inter_block(mi);
248 0 : const int is_compound = has_second_ref(mi);
249 : int skip, ref;
250 :
251 0 : if (seg->update_map) {
252 0 : if (seg->temporal_update) {
253 0 : const int pred_flag = mi->seg_id_predicted;
254 0 : vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
255 0 : vpx_write(w, pred_flag, pred_prob);
256 0 : if (!pred_flag) write_segment_id(w, seg, segment_id);
257 : } else {
258 0 : write_segment_id(w, seg, segment_id);
259 : }
260 : }
261 :
262 0 : skip = write_skip(cm, xd, segment_id, mi, w);
263 :
264 0 : if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
265 0 : vpx_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd));
266 :
267 0 : if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
268 0 : !(is_inter && skip)) {
269 0 : write_selected_tx_size(cm, xd, w);
270 : }
271 :
272 0 : if (!is_inter) {
273 0 : if (bsize >= BLOCK_8X8) {
274 0 : write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]);
275 : } else {
276 : int idx, idy;
277 0 : const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
278 0 : const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
279 0 : for (idy = 0; idy < 2; idy += num_4x4_h) {
280 0 : for (idx = 0; idx < 2; idx += num_4x4_w) {
281 0 : const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
282 0 : write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]);
283 : }
284 : }
285 : }
286 0 : write_intra_mode(w, mi->uv_mode, cm->fc->uv_mode_prob[mode]);
287 : } else {
288 0 : const int mode_ctx = mbmi_ext->mode_context[mi->ref_frame[0]];
289 0 : const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
290 0 : write_ref_frames(cm, xd, w);
291 :
292 : // If segment skip is not enabled code the mode.
293 0 : if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
294 0 : if (bsize >= BLOCK_8X8) {
295 0 : write_inter_mode(w, mode, inter_probs);
296 : }
297 : }
298 :
299 0 : if (cm->interp_filter == SWITCHABLE) {
300 0 : const int ctx = get_pred_context_switchable_interp(xd);
301 0 : vp9_write_token(w, vp9_switchable_interp_tree,
302 0 : cm->fc->switchable_interp_prob[ctx],
303 0 : &switchable_interp_encodings[mi->interp_filter]);
304 0 : ++interp_filter_selected[0][mi->interp_filter];
305 : } else {
306 0 : assert(mi->interp_filter == cm->interp_filter);
307 : }
308 :
309 0 : if (bsize < BLOCK_8X8) {
310 0 : const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
311 0 : const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
312 : int idx, idy;
313 0 : for (idy = 0; idy < 2; idy += num_4x4_h) {
314 0 : for (idx = 0; idx < 2; idx += num_4x4_w) {
315 0 : const int j = idy * 2 + idx;
316 0 : const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
317 0 : write_inter_mode(w, b_mode, inter_probs);
318 0 : if (b_mode == NEWMV) {
319 0 : for (ref = 0; ref < 1 + is_compound; ++ref)
320 0 : vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
321 0 : &mbmi_ext->ref_mvs[mi->ref_frame[ref]][0].as_mv,
322 : nmvc, allow_hp, max_mv_magnitude);
323 : }
324 : }
325 : }
326 : } else {
327 0 : if (mode == NEWMV) {
328 0 : for (ref = 0; ref < 1 + is_compound; ++ref)
329 0 : vp9_encode_mv(cpi, w, &mi->mv[ref].as_mv,
330 0 : &mbmi_ext->ref_mvs[mi->ref_frame[ref]][0].as_mv, nmvc,
331 : allow_hp, max_mv_magnitude);
332 : }
333 : }
334 : }
335 0 : }
336 :
337 0 : static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd,
338 : vpx_writer *w) {
339 0 : const struct segmentation *const seg = &cm->seg;
340 0 : const MODE_INFO *const mi = xd->mi[0];
341 0 : const MODE_INFO *const above_mi = xd->above_mi;
342 0 : const MODE_INFO *const left_mi = xd->left_mi;
343 0 : const BLOCK_SIZE bsize = mi->sb_type;
344 :
345 0 : if (seg->update_map) write_segment_id(w, seg, mi->segment_id);
346 :
347 0 : write_skip(cm, xd, mi->segment_id, mi, w);
348 :
349 0 : if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
350 0 : write_selected_tx_size(cm, xd, w);
351 :
352 0 : if (bsize >= BLOCK_8X8) {
353 0 : write_intra_mode(w, mi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0));
354 : } else {
355 0 : const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
356 0 : const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
357 : int idx, idy;
358 :
359 0 : for (idy = 0; idy < 2; idy += num_4x4_h) {
360 0 : for (idx = 0; idx < 2; idx += num_4x4_w) {
361 0 : const int block = idy * 2 + idx;
362 0 : write_intra_mode(w, mi->bmi[block].as_mode,
363 : get_y_mode_probs(mi, above_mi, left_mi, block));
364 : }
365 : }
366 : }
367 :
368 0 : write_intra_mode(w, mi->uv_mode, vp9_kf_uv_mode_prob[mi->mode]);
369 0 : }
370 :
371 0 : static void write_modes_b(VP9_COMP *cpi, MACROBLOCKD *const xd,
372 : const TileInfo *const tile, vpx_writer *w,
373 : TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
374 : int mi_row, int mi_col,
375 : unsigned int *const max_mv_magnitude,
376 : int interp_filter_selected[MAX_REF_FRAMES]
377 : [SWITCHABLE]) {
378 0 : const VP9_COMMON *const cm = &cpi->common;
379 0 : const MB_MODE_INFO_EXT *const mbmi_ext =
380 0 : cpi->td.mb.mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
381 : MODE_INFO *m;
382 :
383 0 : xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
384 0 : m = xd->mi[0];
385 :
386 0 : set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[m->sb_type],
387 0 : mi_col, num_8x8_blocks_wide_lookup[m->sb_type], cm->mi_rows,
388 : cm->mi_cols);
389 0 : if (frame_is_intra_only(cm)) {
390 0 : write_mb_modes_kf(cm, xd, w);
391 : } else {
392 0 : pack_inter_mode_mvs(cpi, xd, mbmi_ext, w, max_mv_magnitude,
393 : interp_filter_selected);
394 : }
395 :
396 0 : assert(*tok < tok_end);
397 0 : pack_mb_tokens(w, tok, tok_end, cm->bit_depth);
398 0 : }
399 :
400 0 : static void write_partition(const VP9_COMMON *const cm,
401 : const MACROBLOCKD *const xd, int hbs, int mi_row,
402 : int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
403 : vpx_writer *w) {
404 0 : const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
405 0 : const vpx_prob *const probs = xd->partition_probs[ctx];
406 0 : const int has_rows = (mi_row + hbs) < cm->mi_rows;
407 0 : const int has_cols = (mi_col + hbs) < cm->mi_cols;
408 :
409 0 : if (has_rows && has_cols) {
410 0 : vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]);
411 0 : } else if (!has_rows && has_cols) {
412 0 : assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
413 0 : vpx_write(w, p == PARTITION_SPLIT, probs[1]);
414 0 : } else if (has_rows && !has_cols) {
415 0 : assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
416 0 : vpx_write(w, p == PARTITION_SPLIT, probs[2]);
417 : } else {
418 0 : assert(p == PARTITION_SPLIT);
419 : }
420 0 : }
421 :
422 0 : static void write_modes_sb(VP9_COMP *cpi, MACROBLOCKD *const xd,
423 : const TileInfo *const tile, vpx_writer *w,
424 : TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
425 : int mi_row, int mi_col, BLOCK_SIZE bsize,
426 : unsigned int *const max_mv_magnitude,
427 : int interp_filter_selected[MAX_REF_FRAMES]
428 : [SWITCHABLE]) {
429 0 : const VP9_COMMON *const cm = &cpi->common;
430 0 : const int bsl = b_width_log2_lookup[bsize];
431 0 : const int bs = (1 << bsl) / 4;
432 : PARTITION_TYPE partition;
433 : BLOCK_SIZE subsize;
434 0 : const MODE_INFO *m = NULL;
435 :
436 0 : if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
437 :
438 0 : m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
439 :
440 0 : partition = partition_lookup[bsl][m->sb_type];
441 0 : write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w);
442 0 : subsize = get_subsize(bsize, partition);
443 0 : if (subsize < BLOCK_8X8) {
444 0 : write_modes_b(cpi, xd, tile, w, tok, tok_end, mi_row, mi_col,
445 : max_mv_magnitude, interp_filter_selected);
446 : } else {
447 0 : switch (partition) {
448 : case PARTITION_NONE:
449 0 : write_modes_b(cpi, xd, tile, w, tok, tok_end, mi_row, mi_col,
450 : max_mv_magnitude, interp_filter_selected);
451 0 : break;
452 : case PARTITION_HORZ:
453 0 : write_modes_b(cpi, xd, tile, w, tok, tok_end, mi_row, mi_col,
454 : max_mv_magnitude, interp_filter_selected);
455 0 : if (mi_row + bs < cm->mi_rows)
456 0 : write_modes_b(cpi, xd, tile, w, tok, tok_end, mi_row + bs, mi_col,
457 : max_mv_magnitude, interp_filter_selected);
458 0 : break;
459 : case PARTITION_VERT:
460 0 : write_modes_b(cpi, xd, tile, w, tok, tok_end, mi_row, mi_col,
461 : max_mv_magnitude, interp_filter_selected);
462 0 : if (mi_col + bs < cm->mi_cols)
463 0 : write_modes_b(cpi, xd, tile, w, tok, tok_end, mi_row, mi_col + bs,
464 : max_mv_magnitude, interp_filter_selected);
465 0 : break;
466 : case PARTITION_SPLIT:
467 0 : write_modes_sb(cpi, xd, tile, w, tok, tok_end, mi_row, mi_col, subsize,
468 : max_mv_magnitude, interp_filter_selected);
469 0 : write_modes_sb(cpi, xd, tile, w, tok, tok_end, mi_row, mi_col + bs,
470 : subsize, max_mv_magnitude, interp_filter_selected);
471 0 : write_modes_sb(cpi, xd, tile, w, tok, tok_end, mi_row + bs, mi_col,
472 : subsize, max_mv_magnitude, interp_filter_selected);
473 0 : write_modes_sb(cpi, xd, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
474 : subsize, max_mv_magnitude, interp_filter_selected);
475 0 : break;
476 0 : default: assert(0);
477 : }
478 : }
479 :
480 : // update partition context
481 0 : if (bsize >= BLOCK_8X8 &&
482 0 : (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
483 0 : update_partition_context(xd, mi_row, mi_col, subsize, bsize);
484 : }
485 :
486 0 : static void write_modes(VP9_COMP *cpi, MACROBLOCKD *const xd,
487 : const TileInfo *const tile, vpx_writer *w,
488 : TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
489 : unsigned int *const max_mv_magnitude,
490 : int interp_filter_selected[MAX_REF_FRAMES]
491 : [SWITCHABLE]) {
492 0 : const VP9_COMMON *const cm = &cpi->common;
493 : int mi_row, mi_col;
494 :
495 0 : set_partition_probs(cm, xd);
496 :
497 0 : for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
498 0 : mi_row += MI_BLOCK_SIZE) {
499 0 : vp9_zero(xd->left_seg_context);
500 0 : for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
501 0 : mi_col += MI_BLOCK_SIZE)
502 0 : write_modes_sb(cpi, xd, tile, w, tok, tok_end, mi_row, mi_col,
503 : BLOCK_64X64, max_mv_magnitude, interp_filter_selected);
504 : }
505 0 : }
506 :
507 0 : static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size,
508 : vp9_coeff_stats *coef_branch_ct,
509 : vp9_coeff_probs_model *coef_probs) {
510 0 : vp9_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
511 0 : unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
512 0 : cpi->common.counts.eob_branch[tx_size];
513 : int i, j, k, l, m;
514 :
515 0 : for (i = 0; i < PLANE_TYPES; ++i) {
516 0 : for (j = 0; j < REF_TYPES; ++j) {
517 0 : for (k = 0; k < COEF_BANDS; ++k) {
518 0 : for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
519 0 : vp9_tree_probs_from_distribution(vp9_coef_tree,
520 0 : coef_branch_ct[i][j][k][l],
521 0 : coef_counts[i][j][k][l]);
522 0 : coef_branch_ct[i][j][k][l][0][1] =
523 0 : eob_branch_ct[i][j][k][l] - coef_branch_ct[i][j][k][l][0][0];
524 0 : for (m = 0; m < UNCONSTRAINED_NODES; ++m)
525 0 : coef_probs[i][j][k][l][m] =
526 0 : get_binary_prob(coef_branch_ct[i][j][k][l][m][0],
527 0 : coef_branch_ct[i][j][k][l][m][1]);
528 : }
529 : }
530 : }
531 : }
532 0 : }
533 :
534 0 : static void update_coef_probs_common(vpx_writer *const bc, VP9_COMP *cpi,
535 : TX_SIZE tx_size,
536 : vp9_coeff_stats *frame_branch_ct,
537 : vp9_coeff_probs_model *new_coef_probs) {
538 0 : vp9_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
539 0 : const vpx_prob upd = DIFF_UPDATE_PROB;
540 0 : const int entropy_nodes_update = UNCONSTRAINED_NODES;
541 : int i, j, k, l, t;
542 0 : int stepsize = cpi->sf.coeff_prob_appx_step;
543 :
544 0 : switch (cpi->sf.use_fast_coef_updates) {
545 : case TWO_LOOP: {
546 : /* dry run to see if there is any update at all needed */
547 0 : int savings = 0;
548 0 : int update[2] = { 0, 0 };
549 0 : for (i = 0; i < PLANE_TYPES; ++i) {
550 0 : for (j = 0; j < REF_TYPES; ++j) {
551 0 : for (k = 0; k < COEF_BANDS; ++k) {
552 0 : for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
553 0 : for (t = 0; t < entropy_nodes_update; ++t) {
554 0 : vpx_prob newp = new_coef_probs[i][j][k][l][t];
555 0 : const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
556 : int s;
557 0 : int u = 0;
558 0 : if (t == PIVOT_NODE)
559 0 : s = vp9_prob_diff_update_savings_search_model(
560 0 : frame_branch_ct[i][j][k][l][0], oldp, &newp, upd,
561 : stepsize);
562 : else
563 0 : s = vp9_prob_diff_update_savings_search(
564 0 : frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
565 0 : if (s > 0 && newp != oldp) u = 1;
566 0 : if (u)
567 0 : savings += s - (int)(vp9_cost_zero(upd));
568 : else
569 0 : savings -= (int)(vp9_cost_zero(upd));
570 0 : update[u]++;
571 : }
572 : }
573 : }
574 : }
575 : }
576 :
577 : // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
578 : /* Is coef updated at all */
579 0 : if (update[1] == 0 || savings < 0) {
580 0 : vpx_write_bit(bc, 0);
581 0 : return;
582 : }
583 0 : vpx_write_bit(bc, 1);
584 0 : for (i = 0; i < PLANE_TYPES; ++i) {
585 0 : for (j = 0; j < REF_TYPES; ++j) {
586 0 : for (k = 0; k < COEF_BANDS; ++k) {
587 0 : for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
588 : // calc probs and branch cts for this frame only
589 0 : for (t = 0; t < entropy_nodes_update; ++t) {
590 0 : vpx_prob newp = new_coef_probs[i][j][k][l][t];
591 0 : vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
592 0 : const vpx_prob upd = DIFF_UPDATE_PROB;
593 : int s;
594 0 : int u = 0;
595 0 : if (t == PIVOT_NODE)
596 0 : s = vp9_prob_diff_update_savings_search_model(
597 0 : frame_branch_ct[i][j][k][l][0], *oldp, &newp, upd,
598 : stepsize);
599 : else
600 0 : s = vp9_prob_diff_update_savings_search(
601 0 : frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
602 0 : if (s > 0 && newp != *oldp) u = 1;
603 0 : vpx_write(bc, u, upd);
604 0 : if (u) {
605 : /* send/use new probability */
606 0 : vp9_write_prob_diff_update(bc, newp, *oldp);
607 0 : *oldp = newp;
608 : }
609 : }
610 : }
611 : }
612 : }
613 : }
614 0 : return;
615 : }
616 :
617 : case ONE_LOOP_REDUCED: {
618 0 : int updates = 0;
619 0 : int noupdates_before_first = 0;
620 0 : for (i = 0; i < PLANE_TYPES; ++i) {
621 0 : for (j = 0; j < REF_TYPES; ++j) {
622 0 : for (k = 0; k < COEF_BANDS; ++k) {
623 0 : for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
624 : // calc probs and branch cts for this frame only
625 0 : for (t = 0; t < entropy_nodes_update; ++t) {
626 0 : vpx_prob newp = new_coef_probs[i][j][k][l][t];
627 0 : vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
628 : int s;
629 0 : int u = 0;
630 :
631 0 : if (t == PIVOT_NODE) {
632 0 : s = vp9_prob_diff_update_savings_search_model(
633 0 : frame_branch_ct[i][j][k][l][0], *oldp, &newp, upd,
634 : stepsize);
635 : } else {
636 0 : s = vp9_prob_diff_update_savings_search(
637 0 : frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
638 : }
639 :
640 0 : if (s > 0 && newp != *oldp) u = 1;
641 0 : updates += u;
642 0 : if (u == 0 && updates == 0) {
643 0 : noupdates_before_first++;
644 0 : continue;
645 : }
646 0 : if (u == 1 && updates == 1) {
647 : int v;
648 : // first update
649 0 : vpx_write_bit(bc, 1);
650 0 : for (v = 0; v < noupdates_before_first; ++v)
651 0 : vpx_write(bc, 0, upd);
652 : }
653 0 : vpx_write(bc, u, upd);
654 0 : if (u) {
655 : /* send/use new probability */
656 0 : vp9_write_prob_diff_update(bc, newp, *oldp);
657 0 : *oldp = newp;
658 : }
659 : }
660 : }
661 : }
662 : }
663 : }
664 0 : if (updates == 0) {
665 0 : vpx_write_bit(bc, 0); // no updates
666 : }
667 0 : return;
668 : }
669 0 : default: assert(0);
670 : }
671 : }
672 :
673 0 : static void update_coef_probs(VP9_COMP *cpi, vpx_writer *w) {
674 0 : const TX_MODE tx_mode = cpi->common.tx_mode;
675 0 : const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
676 : TX_SIZE tx_size;
677 0 : for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
678 : vp9_coeff_stats frame_branch_ct[PLANE_TYPES];
679 : vp9_coeff_probs_model frame_coef_probs[PLANE_TYPES];
680 0 : if (cpi->td.counts->tx.tx_totals[tx_size] <= 20 ||
681 0 : (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
682 0 : vpx_write_bit(w, 0);
683 : } else {
684 0 : build_tree_distribution(cpi, tx_size, frame_branch_ct, frame_coef_probs);
685 0 : update_coef_probs_common(w, cpi, tx_size, frame_branch_ct,
686 : frame_coef_probs);
687 : }
688 : }
689 0 : }
690 :
691 0 : static void encode_loopfilter(struct loopfilter *lf,
692 : struct vpx_write_bit_buffer *wb) {
693 : int i;
694 :
695 : // Encode the loop filter level and type
696 0 : vpx_wb_write_literal(wb, lf->filter_level, 6);
697 0 : vpx_wb_write_literal(wb, lf->sharpness_level, 3);
698 :
699 : // Write out loop filter deltas applied at the MB level based on mode or
700 : // ref frame (if they are enabled).
701 0 : vpx_wb_write_bit(wb, lf->mode_ref_delta_enabled);
702 :
703 0 : if (lf->mode_ref_delta_enabled) {
704 0 : vpx_wb_write_bit(wb, lf->mode_ref_delta_update);
705 0 : if (lf->mode_ref_delta_update) {
706 0 : for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
707 0 : const int delta = lf->ref_deltas[i];
708 0 : const int changed = delta != lf->last_ref_deltas[i];
709 0 : vpx_wb_write_bit(wb, changed);
710 0 : if (changed) {
711 0 : lf->last_ref_deltas[i] = delta;
712 0 : vpx_wb_write_literal(wb, abs(delta) & 0x3F, 6);
713 0 : vpx_wb_write_bit(wb, delta < 0);
714 : }
715 : }
716 :
717 0 : for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
718 0 : const int delta = lf->mode_deltas[i];
719 0 : const int changed = delta != lf->last_mode_deltas[i];
720 0 : vpx_wb_write_bit(wb, changed);
721 0 : if (changed) {
722 0 : lf->last_mode_deltas[i] = delta;
723 0 : vpx_wb_write_literal(wb, abs(delta) & 0x3F, 6);
724 0 : vpx_wb_write_bit(wb, delta < 0);
725 : }
726 : }
727 : }
728 : }
729 0 : }
730 :
731 0 : static void write_delta_q(struct vpx_write_bit_buffer *wb, int delta_q) {
732 0 : if (delta_q != 0) {
733 0 : vpx_wb_write_bit(wb, 1);
734 0 : vpx_wb_write_literal(wb, abs(delta_q), 4);
735 0 : vpx_wb_write_bit(wb, delta_q < 0);
736 : } else {
737 0 : vpx_wb_write_bit(wb, 0);
738 : }
739 0 : }
740 :
741 0 : static void encode_quantization(const VP9_COMMON *const cm,
742 : struct vpx_write_bit_buffer *wb) {
743 0 : vpx_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
744 0 : write_delta_q(wb, cm->y_dc_delta_q);
745 0 : write_delta_q(wb, cm->uv_dc_delta_q);
746 0 : write_delta_q(wb, cm->uv_ac_delta_q);
747 0 : }
748 :
749 0 : static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd,
750 : struct vpx_write_bit_buffer *wb) {
751 : int i, j;
752 :
753 0 : const struct segmentation *seg = &cm->seg;
754 :
755 0 : vpx_wb_write_bit(wb, seg->enabled);
756 0 : if (!seg->enabled) return;
757 :
758 : // Segmentation map
759 0 : vpx_wb_write_bit(wb, seg->update_map);
760 0 : if (seg->update_map) {
761 : // Select the coding strategy (temporal or spatial)
762 0 : vp9_choose_segmap_coding_method(cm, xd);
763 : // Write out probabilities used to decode unpredicted macro-block segments
764 0 : for (i = 0; i < SEG_TREE_PROBS; i++) {
765 0 : const int prob = seg->tree_probs[i];
766 0 : const int update = prob != MAX_PROB;
767 0 : vpx_wb_write_bit(wb, update);
768 0 : if (update) vpx_wb_write_literal(wb, prob, 8);
769 : }
770 :
771 : // Write out the chosen coding method.
772 0 : vpx_wb_write_bit(wb, seg->temporal_update);
773 0 : if (seg->temporal_update) {
774 0 : for (i = 0; i < PREDICTION_PROBS; i++) {
775 0 : const int prob = seg->pred_probs[i];
776 0 : const int update = prob != MAX_PROB;
777 0 : vpx_wb_write_bit(wb, update);
778 0 : if (update) vpx_wb_write_literal(wb, prob, 8);
779 : }
780 : }
781 : }
782 :
783 : // Segmentation data
784 0 : vpx_wb_write_bit(wb, seg->update_data);
785 0 : if (seg->update_data) {
786 0 : vpx_wb_write_bit(wb, seg->abs_delta);
787 :
788 0 : for (i = 0; i < MAX_SEGMENTS; i++) {
789 0 : for (j = 0; j < SEG_LVL_MAX; j++) {
790 0 : const int active = segfeature_active(seg, i, j);
791 0 : vpx_wb_write_bit(wb, active);
792 0 : if (active) {
793 0 : const int data = get_segdata(seg, i, j);
794 0 : const int data_max = vp9_seg_feature_data_max(j);
795 :
796 0 : if (vp9_is_segfeature_signed(j)) {
797 0 : encode_unsigned_max(wb, abs(data), data_max);
798 0 : vpx_wb_write_bit(wb, data < 0);
799 : } else {
800 0 : encode_unsigned_max(wb, data, data_max);
801 : }
802 : }
803 : }
804 : }
805 : }
806 : }
807 :
808 0 : static void encode_txfm_probs(VP9_COMMON *cm, vpx_writer *w,
809 : FRAME_COUNTS *counts) {
810 : // Mode
811 0 : vpx_write_literal(w, VPXMIN(cm->tx_mode, ALLOW_32X32), 2);
812 0 : if (cm->tx_mode >= ALLOW_32X32)
813 0 : vpx_write_bit(w, cm->tx_mode == TX_MODE_SELECT);
814 :
815 : // Probabilities
816 0 : if (cm->tx_mode == TX_MODE_SELECT) {
817 : int i, j;
818 : unsigned int ct_8x8p[TX_SIZES - 3][2];
819 : unsigned int ct_16x16p[TX_SIZES - 2][2];
820 : unsigned int ct_32x32p[TX_SIZES - 1][2];
821 :
822 0 : for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
823 0 : tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
824 0 : for (j = 0; j < TX_SIZES - 3; j++)
825 0 : vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]);
826 : }
827 :
828 0 : for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
829 0 : tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
830 0 : for (j = 0; j < TX_SIZES - 2; j++)
831 0 : vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
832 0 : ct_16x16p[j]);
833 : }
834 :
835 0 : for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
836 0 : tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
837 0 : for (j = 0; j < TX_SIZES - 1; j++)
838 0 : vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
839 0 : ct_32x32p[j]);
840 : }
841 : }
842 0 : }
843 :
844 0 : static void write_interp_filter(INTERP_FILTER filter,
845 : struct vpx_write_bit_buffer *wb) {
846 0 : const int filter_to_literal[] = { 1, 0, 2, 3 };
847 :
848 0 : vpx_wb_write_bit(wb, filter == SWITCHABLE);
849 0 : if (filter != SWITCHABLE)
850 0 : vpx_wb_write_literal(wb, filter_to_literal[filter], 2);
851 0 : }
852 :
853 0 : static void fix_interp_filter(VP9_COMMON *cm, FRAME_COUNTS *counts) {
854 0 : if (cm->interp_filter == SWITCHABLE) {
855 : // Check to see if only one of the filters is actually used
856 : int count[SWITCHABLE_FILTERS];
857 0 : int i, j, c = 0;
858 0 : for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
859 0 : count[i] = 0;
860 0 : for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
861 0 : count[i] += counts->switchable_interp[j][i];
862 0 : c += (count[i] > 0);
863 : }
864 0 : if (c == 1) {
865 : // Only one filter is used. So set the filter at frame level
866 0 : for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
867 0 : if (count[i]) {
868 0 : cm->interp_filter = i;
869 0 : break;
870 : }
871 : }
872 : }
873 : }
874 0 : }
875 :
876 0 : static void write_tile_info(const VP9_COMMON *const cm,
877 : struct vpx_write_bit_buffer *wb) {
878 : int min_log2_tile_cols, max_log2_tile_cols, ones;
879 0 : vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
880 :
881 : // columns
882 0 : ones = cm->log2_tile_cols - min_log2_tile_cols;
883 0 : while (ones--) vpx_wb_write_bit(wb, 1);
884 :
885 0 : if (cm->log2_tile_cols < max_log2_tile_cols) vpx_wb_write_bit(wb, 0);
886 :
887 : // rows
888 0 : vpx_wb_write_bit(wb, cm->log2_tile_rows != 0);
889 0 : if (cm->log2_tile_rows != 0) vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
890 0 : }
891 :
892 0 : int vp9_get_refresh_mask(VP9_COMP *cpi) {
893 0 : if (vp9_preserve_existing_gf(cpi)) {
894 : // We have decided to preserve the previously existing golden frame as our
895 : // new ARF frame. However, in the short term we leave it in the GF slot and,
896 : // if we're updating the GF with the current decoded frame, we save it
897 : // instead to the ARF slot.
898 : // Later, in the function vp9_encoder.c:vp9_update_reference_frames() we
899 : // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
900 : // there so that it can be done outside of the recode loop.
901 : // Note: This is highly specific to the use of ARF as a forward reference,
902 : // and this needs to be generalized as other uses are implemented
903 : // (like RTC/temporal scalability).
904 0 : return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
905 0 : (cpi->refresh_golden_frame << cpi->alt_fb_idx);
906 : } else {
907 0 : int arf_idx = cpi->alt_fb_idx;
908 0 : if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
909 0 : const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
910 0 : arf_idx = gf_group->arf_update_idx[gf_group->index];
911 : }
912 0 : return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
913 0 : (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
914 0 : (cpi->refresh_alt_ref_frame << arf_idx);
915 : }
916 : }
917 :
918 0 : static int encode_tile_worker(VP9_COMP *cpi, VP9BitstreamWorkerData *data) {
919 0 : MACROBLOCKD *const xd = &data->xd;
920 0 : vpx_start_encode(&data->bit_writer, data->dest);
921 0 : write_modes(cpi, xd, &cpi->tile_data[data->tile_idx].tile_info,
922 0 : &data->bit_writer, &data->tok, data->tok_end,
923 0 : &data->max_mv_magnitude, data->interp_filter_selected);
924 0 : assert(data->tok == data->tok_end);
925 0 : vpx_stop_encode(&data->bit_writer);
926 0 : return 1;
927 : }
928 :
929 0 : void vp9_bitstream_encode_tiles_buffer_dealloc(VP9_COMP *const cpi) {
930 0 : if (cpi->vp9_bitstream_worker_data) {
931 : int i;
932 0 : for (i = 1; i < cpi->num_workers; ++i) {
933 0 : vpx_free(cpi->vp9_bitstream_worker_data[i].dest);
934 : }
935 0 : vpx_free(cpi->vp9_bitstream_worker_data);
936 0 : cpi->vp9_bitstream_worker_data = NULL;
937 : }
938 0 : }
939 :
940 0 : static int encode_tiles_buffer_alloc(VP9_COMP *const cpi) {
941 : int i;
942 0 : const size_t worker_data_size =
943 0 : cpi->num_workers * sizeof(*cpi->vp9_bitstream_worker_data);
944 0 : cpi->vp9_bitstream_worker_data = vpx_memalign(16, worker_data_size);
945 0 : memset(cpi->vp9_bitstream_worker_data, 0, worker_data_size);
946 0 : if (!cpi->vp9_bitstream_worker_data) return 1;
947 0 : for (i = 1; i < cpi->num_workers; ++i) {
948 0 : cpi->vp9_bitstream_worker_data[i].dest_size =
949 0 : cpi->oxcf.width * cpi->oxcf.height;
950 0 : cpi->vp9_bitstream_worker_data[i].dest =
951 0 : vpx_malloc(cpi->vp9_bitstream_worker_data[i].dest_size);
952 0 : if (!cpi->vp9_bitstream_worker_data[i].dest) return 1;
953 : }
954 0 : return 0;
955 : }
956 :
957 0 : static size_t encode_tiles_mt(VP9_COMP *cpi, uint8_t *data_ptr) {
958 0 : const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
959 0 : VP9_COMMON *const cm = &cpi->common;
960 0 : const int tile_cols = 1 << cm->log2_tile_cols;
961 0 : const int num_workers = cpi->num_workers;
962 0 : size_t total_size = 0;
963 0 : int tile_col = 0;
964 :
965 0 : if (!cpi->vp9_bitstream_worker_data ||
966 0 : cpi->vp9_bitstream_worker_data[1].dest_size >
967 0 : (cpi->oxcf.width * cpi->oxcf.height)) {
968 0 : vp9_bitstream_encode_tiles_buffer_dealloc(cpi);
969 0 : if (encode_tiles_buffer_alloc(cpi)) return 0;
970 : }
971 :
972 0 : while (tile_col < tile_cols) {
973 : int i, j;
974 0 : for (i = 0; i < num_workers && tile_col < tile_cols; ++i) {
975 0 : VPxWorker *const worker = &cpi->workers[i];
976 0 : VP9BitstreamWorkerData *const data = &cpi->vp9_bitstream_worker_data[i];
977 :
978 : // Populate the worker data.
979 0 : data->xd = cpi->td.mb.e_mbd;
980 0 : data->tile_idx = tile_col;
981 0 : data->tok = cpi->tile_tok[0][tile_col];
982 0 : data->tok_end = cpi->tile_tok[0][tile_col] + cpi->tok_count[0][tile_col];
983 0 : data->max_mv_magnitude = cpi->max_mv_magnitude;
984 0 : memset(data->interp_filter_selected, 0,
985 : sizeof(data->interp_filter_selected[0][0]) * SWITCHABLE);
986 :
987 : // First thread can directly write into the output buffer.
988 0 : if (i == 0) {
989 : // If this worker happens to be for the last tile, then do not offset it
990 : // by 4 for the tile size.
991 0 : data->dest =
992 0 : data_ptr + total_size + (tile_col == tile_cols - 1 ? 0 : 4);
993 : }
994 0 : worker->data1 = cpi;
995 0 : worker->data2 = data;
996 0 : worker->hook = (VPxWorkerHook)encode_tile_worker;
997 0 : worker->had_error = 0;
998 :
999 0 : if (i < num_workers - 1) {
1000 0 : winterface->launch(worker);
1001 : } else {
1002 0 : winterface->execute(worker);
1003 : }
1004 0 : ++tile_col;
1005 : }
1006 0 : for (j = 0; j < i; ++j) {
1007 0 : VPxWorker *const worker = &cpi->workers[j];
1008 0 : VP9BitstreamWorkerData *const data =
1009 : (VP9BitstreamWorkerData *)worker->data2;
1010 : uint32_t tile_size;
1011 : int k;
1012 :
1013 0 : if (!winterface->sync(worker)) return 0;
1014 0 : tile_size = data->bit_writer.pos;
1015 :
1016 : // Aggregate per-thread bitstream stats.
1017 0 : cpi->max_mv_magnitude =
1018 0 : VPXMAX(cpi->max_mv_magnitude, data->max_mv_magnitude);
1019 0 : for (k = 0; k < SWITCHABLE; ++k) {
1020 0 : cpi->interp_filter_selected[0][k] += data->interp_filter_selected[0][k];
1021 : }
1022 :
1023 : // Prefix the size of the tile on all but the last.
1024 0 : if (tile_col != tile_cols || j < i - 1) {
1025 0 : mem_put_be32(data_ptr + total_size, tile_size);
1026 0 : total_size += 4;
1027 : }
1028 0 : if (j > 0) {
1029 0 : memcpy(data_ptr + total_size, data->dest, tile_size);
1030 : }
1031 0 : total_size += tile_size;
1032 : }
1033 : }
1034 0 : return total_size;
1035 : }
1036 :
1037 0 : static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
1038 0 : VP9_COMMON *const cm = &cpi->common;
1039 0 : MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1040 : vpx_writer residual_bc;
1041 : int tile_row, tile_col;
1042 : TOKENEXTRA *tok_end;
1043 0 : size_t total_size = 0;
1044 0 : const int tile_cols = 1 << cm->log2_tile_cols;
1045 0 : const int tile_rows = 1 << cm->log2_tile_rows;
1046 :
1047 0 : memset(cm->above_seg_context, 0,
1048 0 : sizeof(*cm->above_seg_context) * mi_cols_aligned_to_sb(cm->mi_cols));
1049 :
1050 : // Encoding tiles in parallel is done only for realtime mode now. In other
1051 : // modes the speed up is insignificant and requires further testing to ensure
1052 : // that it does not make the overall process worse in any case.
1053 0 : if (cpi->oxcf.mode == REALTIME && cpi->num_workers > 1 && tile_rows == 1 &&
1054 : tile_cols > 1) {
1055 0 : return encode_tiles_mt(cpi, data_ptr);
1056 : }
1057 :
1058 0 : for (tile_row = 0; tile_row < tile_rows; tile_row++) {
1059 0 : for (tile_col = 0; tile_col < tile_cols; tile_col++) {
1060 0 : int tile_idx = tile_row * tile_cols + tile_col;
1061 0 : TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
1062 :
1063 0 : tok_end = cpi->tile_tok[tile_row][tile_col] +
1064 0 : cpi->tok_count[tile_row][tile_col];
1065 :
1066 0 : if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
1067 0 : vpx_start_encode(&residual_bc, data_ptr + total_size + 4);
1068 : else
1069 0 : vpx_start_encode(&residual_bc, data_ptr + total_size);
1070 :
1071 0 : write_modes(cpi, xd, &cpi->tile_data[tile_idx].tile_info, &residual_bc,
1072 0 : &tok, tok_end, &cpi->max_mv_magnitude,
1073 0 : cpi->interp_filter_selected);
1074 0 : assert(tok == tok_end);
1075 0 : vpx_stop_encode(&residual_bc);
1076 0 : if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
1077 : // size of this tile
1078 0 : mem_put_be32(data_ptr + total_size, residual_bc.pos);
1079 0 : total_size += 4;
1080 : }
1081 :
1082 0 : total_size += residual_bc.pos;
1083 : }
1084 : }
1085 0 : return total_size;
1086 : }
1087 :
1088 0 : static void write_render_size(const VP9_COMMON *cm,
1089 : struct vpx_write_bit_buffer *wb) {
1090 0 : const int scaling_active =
1091 0 : cm->width != cm->render_width || cm->height != cm->render_height;
1092 0 : vpx_wb_write_bit(wb, scaling_active);
1093 0 : if (scaling_active) {
1094 0 : vpx_wb_write_literal(wb, cm->render_width - 1, 16);
1095 0 : vpx_wb_write_literal(wb, cm->render_height - 1, 16);
1096 : }
1097 0 : }
1098 :
1099 0 : static void write_frame_size(const VP9_COMMON *cm,
1100 : struct vpx_write_bit_buffer *wb) {
1101 0 : vpx_wb_write_literal(wb, cm->width - 1, 16);
1102 0 : vpx_wb_write_literal(wb, cm->height - 1, 16);
1103 :
1104 0 : write_render_size(cm, wb);
1105 0 : }
1106 :
1107 0 : static void write_frame_size_with_refs(VP9_COMP *cpi,
1108 : struct vpx_write_bit_buffer *wb) {
1109 0 : VP9_COMMON *const cm = &cpi->common;
1110 0 : int found = 0;
1111 :
1112 : MV_REFERENCE_FRAME ref_frame;
1113 0 : for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1114 0 : YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
1115 :
1116 : // Set "found" to 0 for temporal svc and for spatial svc key frame
1117 0 : if (cpi->use_svc &&
1118 0 : ((cpi->svc.number_temporal_layers > 1 &&
1119 0 : cpi->oxcf.rc_mode == VPX_CBR) ||
1120 0 : (cpi->svc.number_spatial_layers > 1 &&
1121 0 : cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame) ||
1122 0 : (is_two_pass_svc(cpi) &&
1123 0 : cpi->svc.encode_empty_frame_state == ENCODING &&
1124 0 : cpi->svc.layer_context[0].frames_from_key_frame <
1125 0 : cpi->svc.number_temporal_layers + 1))) {
1126 0 : found = 0;
1127 0 : } else if (cfg != NULL) {
1128 0 : found =
1129 0 : cm->width == cfg->y_crop_width && cm->height == cfg->y_crop_height;
1130 : }
1131 0 : vpx_wb_write_bit(wb, found);
1132 0 : if (found) {
1133 0 : break;
1134 : }
1135 : }
1136 :
1137 0 : if (!found) {
1138 0 : vpx_wb_write_literal(wb, cm->width - 1, 16);
1139 0 : vpx_wb_write_literal(wb, cm->height - 1, 16);
1140 : }
1141 :
1142 0 : write_render_size(cm, wb);
1143 0 : }
1144 :
1145 0 : static void write_sync_code(struct vpx_write_bit_buffer *wb) {
1146 0 : vpx_wb_write_literal(wb, VP9_SYNC_CODE_0, 8);
1147 0 : vpx_wb_write_literal(wb, VP9_SYNC_CODE_1, 8);
1148 0 : vpx_wb_write_literal(wb, VP9_SYNC_CODE_2, 8);
1149 0 : }
1150 :
1151 0 : static void write_profile(BITSTREAM_PROFILE profile,
1152 : struct vpx_write_bit_buffer *wb) {
1153 0 : switch (profile) {
1154 0 : case PROFILE_0: vpx_wb_write_literal(wb, 0, 2); break;
1155 0 : case PROFILE_1: vpx_wb_write_literal(wb, 2, 2); break;
1156 0 : case PROFILE_2: vpx_wb_write_literal(wb, 1, 2); break;
1157 0 : case PROFILE_3: vpx_wb_write_literal(wb, 6, 3); break;
1158 0 : default: assert(0);
1159 : }
1160 0 : }
1161 :
1162 0 : static void write_bitdepth_colorspace_sampling(
1163 : VP9_COMMON *const cm, struct vpx_write_bit_buffer *wb) {
1164 0 : if (cm->profile >= PROFILE_2) {
1165 0 : assert(cm->bit_depth > VPX_BITS_8);
1166 0 : vpx_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
1167 : }
1168 0 : vpx_wb_write_literal(wb, cm->color_space, 3);
1169 0 : if (cm->color_space != VPX_CS_SRGB) {
1170 : // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
1171 0 : vpx_wb_write_bit(wb, cm->color_range);
1172 0 : if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1173 0 : assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
1174 0 : vpx_wb_write_bit(wb, cm->subsampling_x);
1175 0 : vpx_wb_write_bit(wb, cm->subsampling_y);
1176 0 : vpx_wb_write_bit(wb, 0); // unused
1177 : } else {
1178 0 : assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
1179 : }
1180 : } else {
1181 0 : assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
1182 0 : vpx_wb_write_bit(wb, 0); // unused
1183 : }
1184 0 : }
1185 :
1186 0 : static void write_uncompressed_header(VP9_COMP *cpi,
1187 : struct vpx_write_bit_buffer *wb) {
1188 0 : VP9_COMMON *const cm = &cpi->common;
1189 0 : MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1190 :
1191 0 : vpx_wb_write_literal(wb, VP9_FRAME_MARKER, 2);
1192 :
1193 0 : write_profile(cm->profile, wb);
1194 :
1195 0 : vpx_wb_write_bit(wb, 0); // show_existing_frame
1196 0 : vpx_wb_write_bit(wb, cm->frame_type);
1197 0 : vpx_wb_write_bit(wb, cm->show_frame);
1198 0 : vpx_wb_write_bit(wb, cm->error_resilient_mode);
1199 :
1200 0 : if (cm->frame_type == KEY_FRAME) {
1201 0 : write_sync_code(wb);
1202 0 : write_bitdepth_colorspace_sampling(cm, wb);
1203 0 : write_frame_size(cm, wb);
1204 : } else {
1205 : // In spatial svc if it's not error_resilient_mode then we need to code all
1206 : // visible frames as invisible. But we need to keep the show_frame flag so
1207 : // that the publisher could know whether it is supposed to be visible.
1208 : // So we will code the show_frame flag as it is. Then code the intra_only
1209 : // bit here. This will make the bitstream incompatible. In the player we
1210 : // will change to show_frame flag to 0, then add an one byte frame with
1211 : // show_existing_frame flag which tells the decoder which frame we want to
1212 : // show.
1213 0 : if (!cm->show_frame) vpx_wb_write_bit(wb, cm->intra_only);
1214 :
1215 0 : if (!cm->error_resilient_mode)
1216 0 : vpx_wb_write_literal(wb, cm->reset_frame_context, 2);
1217 :
1218 0 : if (cm->intra_only) {
1219 0 : write_sync_code(wb);
1220 :
1221 : // Note for profile 0, 420 8bpp is assumed.
1222 0 : if (cm->profile > PROFILE_0) {
1223 0 : write_bitdepth_colorspace_sampling(cm, wb);
1224 : }
1225 :
1226 0 : vpx_wb_write_literal(wb, vp9_get_refresh_mask(cpi), REF_FRAMES);
1227 0 : write_frame_size(cm, wb);
1228 : } else {
1229 : MV_REFERENCE_FRAME ref_frame;
1230 0 : vpx_wb_write_literal(wb, vp9_get_refresh_mask(cpi), REF_FRAMES);
1231 0 : for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1232 0 : assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
1233 0 : vpx_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
1234 : REF_FRAMES_LOG2);
1235 0 : vpx_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
1236 : }
1237 :
1238 0 : write_frame_size_with_refs(cpi, wb);
1239 :
1240 0 : vpx_wb_write_bit(wb, cm->allow_high_precision_mv);
1241 :
1242 0 : fix_interp_filter(cm, cpi->td.counts);
1243 0 : write_interp_filter(cm->interp_filter, wb);
1244 : }
1245 : }
1246 :
1247 0 : if (!cm->error_resilient_mode) {
1248 0 : vpx_wb_write_bit(wb, cm->refresh_frame_context);
1249 0 : vpx_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
1250 : }
1251 :
1252 0 : vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
1253 :
1254 0 : encode_loopfilter(&cm->lf, wb);
1255 0 : encode_quantization(cm, wb);
1256 0 : encode_segmentation(cm, xd, wb);
1257 :
1258 0 : write_tile_info(cm, wb);
1259 0 : }
1260 :
1261 0 : static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
1262 0 : VP9_COMMON *const cm = &cpi->common;
1263 0 : MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
1264 0 : FRAME_CONTEXT *const fc = cm->fc;
1265 0 : FRAME_COUNTS *counts = cpi->td.counts;
1266 : vpx_writer header_bc;
1267 :
1268 0 : vpx_start_encode(&header_bc, data);
1269 :
1270 0 : if (xd->lossless)
1271 0 : cm->tx_mode = ONLY_4X4;
1272 : else
1273 0 : encode_txfm_probs(cm, &header_bc, counts);
1274 :
1275 0 : update_coef_probs(cpi, &header_bc);
1276 0 : update_skip_probs(cm, &header_bc, counts);
1277 :
1278 0 : if (!frame_is_intra_only(cm)) {
1279 : int i;
1280 :
1281 0 : for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
1282 0 : prob_diff_update(vp9_inter_mode_tree, cm->fc->inter_mode_probs[i],
1283 0 : counts->inter_mode[i], INTER_MODES, &header_bc);
1284 :
1285 0 : if (cm->interp_filter == SWITCHABLE)
1286 0 : update_switchable_interp_probs(cm, &header_bc, counts);
1287 :
1288 0 : for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1289 0 : vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
1290 0 : counts->intra_inter[i]);
1291 :
1292 0 : if (cpi->allow_comp_inter_inter) {
1293 0 : const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
1294 0 : const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
1295 :
1296 0 : vpx_write_bit(&header_bc, use_compound_pred);
1297 0 : if (use_compound_pred) {
1298 0 : vpx_write_bit(&header_bc, use_hybrid_pred);
1299 0 : if (use_hybrid_pred)
1300 0 : for (i = 0; i < COMP_INTER_CONTEXTS; i++)
1301 0 : vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
1302 0 : counts->comp_inter[i]);
1303 : }
1304 : }
1305 :
1306 0 : if (cm->reference_mode != COMPOUND_REFERENCE) {
1307 0 : for (i = 0; i < REF_CONTEXTS; i++) {
1308 0 : vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
1309 0 : counts->single_ref[i][0]);
1310 0 : vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
1311 0 : counts->single_ref[i][1]);
1312 : }
1313 : }
1314 :
1315 0 : if (cm->reference_mode != SINGLE_REFERENCE)
1316 0 : for (i = 0; i < REF_CONTEXTS; i++)
1317 0 : vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
1318 0 : counts->comp_ref[i]);
1319 :
1320 0 : for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
1321 0 : prob_diff_update(vp9_intra_mode_tree, cm->fc->y_mode_prob[i],
1322 0 : counts->y_mode[i], INTRA_MODES, &header_bc);
1323 :
1324 0 : for (i = 0; i < PARTITION_CONTEXTS; ++i)
1325 0 : prob_diff_update(vp9_partition_tree, fc->partition_prob[i],
1326 0 : counts->partition[i], PARTITION_TYPES, &header_bc);
1327 :
1328 0 : vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
1329 : &counts->mv);
1330 : }
1331 :
1332 0 : vpx_stop_encode(&header_bc);
1333 0 : assert(header_bc.pos <= 0xffff);
1334 :
1335 0 : return header_bc.pos;
1336 : }
1337 :
1338 0 : void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) {
1339 0 : uint8_t *data = dest;
1340 : size_t first_part_size, uncompressed_hdr_size;
1341 0 : struct vpx_write_bit_buffer wb = { data, 0 };
1342 : struct vpx_write_bit_buffer saved_wb;
1343 :
1344 0 : write_uncompressed_header(cpi, &wb);
1345 0 : saved_wb = wb;
1346 0 : vpx_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
1347 :
1348 0 : uncompressed_hdr_size = vpx_wb_bytes_written(&wb);
1349 0 : data += uncompressed_hdr_size;
1350 :
1351 0 : vpx_clear_system_state();
1352 :
1353 0 : first_part_size = write_compressed_header(cpi, data);
1354 0 : data += first_part_size;
1355 : // TODO(jbb): Figure out what to do if first_part_size > 16 bits.
1356 0 : vpx_wb_write_literal(&saved_wb, (int)first_part_size, 16);
1357 :
1358 0 : data += encode_tiles(cpi, data);
1359 :
1360 0 : *size = data - dest;
1361 0 : }
|