Line data Source code
1 : /*
2 : * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 : *
4 : * This source code is subject to the terms of the BSD 2 Clause License and
5 : * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 : * was not distributed with this source code in the LICENSE file, you can
7 : * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 : * Media Patent License 1.0 was not distributed with this source code in the
9 : * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 : */
11 :
12 : #include <assert.h>
13 : #include <limits.h>
14 : #include <stdio.h>
15 :
16 : #include "aom/aom_encoder.h"
17 : #include "aom_dsp/bitwriter_buffer.h"
18 : #include "aom_dsp/aom_dsp_common.h"
19 : #include "aom_dsp/binary_codes_writer.h"
20 : #include "aom_mem/aom_mem.h"
21 : #include "aom_ports/mem_ops.h"
22 : #include "aom_ports/system_state.h"
23 : #if CONFIG_BITSTREAM_DEBUG
24 : #include "aom_util/debug_util.h"
25 : #endif // CONFIG_BITSTREAM_DEBUG
26 :
27 : #if CONFIG_CDEF
28 : #include "av1/common/cdef.h"
29 : #include "av1/common/clpf.h"
30 : #endif // CONFIG_CDEF
31 : #include "av1/common/entropy.h"
32 : #include "av1/common/entropymode.h"
33 : #include "av1/common/entropymv.h"
34 : #include "av1/common/mvref_common.h"
35 : #include "av1/common/odintrin.h"
36 : #include "av1/common/pred_common.h"
37 : #include "av1/common/reconinter.h"
38 : #if CONFIG_EXT_INTRA
39 : #include "av1/common/reconintra.h"
40 : #endif // CONFIG_EXT_INTRA
41 : #include "av1/common/seg_common.h"
42 : #include "av1/common/tile_common.h"
43 :
44 : #if CONFIG_ANS
45 : #include "aom_dsp/buf_ans.h"
46 : #endif // CONFIG_ANS
47 : #if CONFIG_LV_MAP
48 : #include "av1/encoder/encodetxb.h"
49 : #endif // CONFIG_LV_MAP
50 : #include "av1/encoder/bitstream.h"
51 : #include "av1/encoder/cost.h"
52 : #include "av1/encoder/encodemv.h"
53 : #include "av1/encoder/mcomp.h"
54 : #if CONFIG_PALETTE && CONFIG_PALETTE_DELTA_ENCODING
55 : #include "av1/encoder/palette.h"
56 : #endif // CONFIG_PALETTE && CONFIG_PALETTE_DELTA_ENCODING
57 : #include "av1/encoder/segmentation.h"
58 : #include "av1/encoder/subexp.h"
59 : #include "av1/encoder/tokenize.h"
60 : #if CONFIG_PVQ
61 : #include "av1/encoder/pvq_encoder.h"
62 : #endif
63 :
64 : static struct av1_token intra_mode_encodings[INTRA_MODES];
65 : static struct av1_token switchable_interp_encodings[SWITCHABLE_FILTERS];
66 : static struct av1_token partition_encodings[PARTITION_TYPES];
67 : #if CONFIG_EXT_INTER
68 : static const struct av1_token
69 : inter_compound_mode_encodings[INTER_COMPOUND_MODES] = {
70 : { 2, 2 }, { 12, 4 }, { 52, 6 }, { 53, 6 },
71 : { 54, 6 }, { 55, 6 }, { 0, 1 }, { 7, 3 }
72 : };
73 : #endif // CONFIG_EXT_INTER
74 : #if CONFIG_PALETTE
75 : static struct av1_token palette_size_encodings[PALETTE_SIZES];
76 : static struct av1_token palette_color_index_encodings[PALETTE_SIZES]
77 : [PALETTE_COLORS];
78 : #endif // CONFIG_PALETTE
79 :
80 : #if CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE
81 0 : static INLINE void write_uniform(aom_writer *w, int n, int v) {
82 0 : const int l = get_unsigned_bits(n);
83 0 : const int m = (1 << l) - n;
84 0 : if (l == 0) return;
85 0 : if (v < m) {
86 0 : aom_write_literal(w, v, l - 1);
87 : } else {
88 0 : aom_write_literal(w, m + ((v - m) >> 1), l - 1);
89 0 : aom_write_literal(w, (v - m) & 1, 1);
90 : }
91 : }
92 : #endif // CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE
93 :
94 : #if CONFIG_EXT_TX
95 : static struct av1_token ext_tx_inter_encodings[EXT_TX_SETS_INTER][TX_TYPES];
96 : static struct av1_token ext_tx_intra_encodings[EXT_TX_SETS_INTRA][TX_TYPES];
97 : #else
98 : static struct av1_token ext_tx_encodings[TX_TYPES];
99 : #endif // CONFIG_EXT_TX
100 : #if CONFIG_GLOBAL_MOTION
101 : static struct av1_token global_motion_types_encodings[GLOBAL_TRANS_TYPES];
102 : #endif // CONFIG_GLOBAL_MOTION
103 : #if CONFIG_EXT_INTRA
104 : #if CONFIG_INTRA_INTERP
105 : static struct av1_token intra_filter_encodings[INTRA_FILTERS];
106 : #endif // CONFIG_INTRA_INTERP
107 : #endif // CONFIG_EXT_INTRA
108 : #if CONFIG_EXT_INTER
109 : #if CONFIG_INTERINTRA
110 : static struct av1_token interintra_mode_encodings[INTERINTRA_MODES];
111 : #endif
112 : #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
113 : static struct av1_token compound_type_encodings[COMPOUND_TYPES];
114 : #endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
115 : #endif // CONFIG_EXT_INTER
116 : #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
117 : static struct av1_token motion_mode_encodings[MOTION_MODES];
118 : #endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
119 : #if CONFIG_LOOP_RESTORATION
120 : static struct av1_token switchable_restore_encodings[RESTORE_SWITCHABLE_TYPES];
121 : #endif // CONFIG_LOOP_RESTORATION
122 : static void write_uncompressed_header(AV1_COMP *cpi,
123 : struct aom_write_bit_buffer *wb);
124 : static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data);
125 : static int remux_tiles(const AV1_COMMON *const cm, uint8_t *dst,
126 : const uint32_t data_size, const uint32_t max_tile_size,
127 : const uint32_t max_tile_col_size,
128 : int *const tile_size_bytes,
129 : int *const tile_col_size_bytes);
130 :
131 0 : void av1_encode_token_init(void) {
132 : #if CONFIG_EXT_TX || CONFIG_PALETTE
133 : int s;
134 : #endif // CONFIG_EXT_TX || CONFIG_PALETTE
135 : #if CONFIG_EXT_TX
136 0 : for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
137 0 : av1_tokens_from_tree(ext_tx_inter_encodings[s], av1_ext_tx_inter_tree[s]);
138 : }
139 0 : for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
140 0 : av1_tokens_from_tree(ext_tx_intra_encodings[s], av1_ext_tx_intra_tree[s]);
141 : }
142 : #else
143 : av1_tokens_from_tree(ext_tx_encodings, av1_ext_tx_tree);
144 : #endif // CONFIG_EXT_TX
145 0 : av1_tokens_from_tree(intra_mode_encodings, av1_intra_mode_tree);
146 0 : av1_tokens_from_tree(switchable_interp_encodings, av1_switchable_interp_tree);
147 0 : av1_tokens_from_tree(partition_encodings, av1_partition_tree);
148 :
149 : #if CONFIG_PALETTE
150 0 : av1_tokens_from_tree(palette_size_encodings, av1_palette_size_tree);
151 0 : for (s = 0; s < PALETTE_SIZES; ++s) {
152 0 : av1_tokens_from_tree(palette_color_index_encodings[s],
153 0 : av1_palette_color_index_tree[s]);
154 : }
155 : #endif // CONFIG_PALETTE
156 :
157 : #if CONFIG_EXT_INTRA && CONFIG_INTRA_INTERP
158 : av1_tokens_from_tree(intra_filter_encodings, av1_intra_filter_tree);
159 : #endif // CONFIG_EXT_INTRA && CONFIG_INTRA_INTERP
160 : #if CONFIG_EXT_INTER
161 : #if CONFIG_INTERINTRA
162 0 : av1_tokens_from_tree(interintra_mode_encodings, av1_interintra_mode_tree);
163 : #endif // CONFIG_INTERINTRA
164 : #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
165 0 : av1_tokens_from_tree(compound_type_encodings, av1_compound_type_tree);
166 : #endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
167 : #endif // CONFIG_EXT_INTER
168 : #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
169 0 : av1_tokens_from_tree(motion_mode_encodings, av1_motion_mode_tree);
170 : #endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
171 : #if CONFIG_GLOBAL_MOTION
172 0 : av1_tokens_from_tree(global_motion_types_encodings,
173 : av1_global_motion_types_tree);
174 : #endif // CONFIG_GLOBAL_MOTION
175 : #if CONFIG_LOOP_RESTORATION
176 : av1_tokens_from_tree(switchable_restore_encodings,
177 : av1_switchable_restore_tree);
178 : #endif // CONFIG_LOOP_RESTORATION
179 :
180 : /* This hack is necessary when CONFIG_DUAL_FILTER is enabled because the five
181 : SWITCHABLE_FILTERS are not consecutive, e.g., 0, 1, 2, 3, 4, when doing
182 : an in-order traversal of the av1_switchable_interp_tree structure. */
183 0 : av1_indices_from_tree(av1_switchable_interp_ind, av1_switchable_interp_inv,
184 : av1_switchable_interp_tree);
185 : /* This hack is necessary because the four TX_TYPES are not consecutive,
186 : e.g., 0, 1, 2, 3, when doing an in-order traversal of the av1_ext_tx_tree
187 : structure. */
188 : #if CONFIG_EXT_TX
189 0 : for (s = 1; s < EXT_TX_SETS_INTRA; ++s)
190 0 : av1_indices_from_tree(av1_ext_tx_intra_ind[s], av1_ext_tx_intra_inv[s],
191 0 : av1_ext_tx_intra_tree[s]);
192 0 : for (s = 1; s < EXT_TX_SETS_INTER; ++s)
193 0 : av1_indices_from_tree(av1_ext_tx_inter_ind[s], av1_ext_tx_inter_inv[s],
194 0 : av1_ext_tx_inter_tree[s]);
195 : #else
196 : av1_indices_from_tree(av1_ext_tx_ind, av1_ext_tx_inv, av1_ext_tx_tree);
197 : #endif
198 0 : av1_indices_from_tree(av1_intra_mode_ind, av1_intra_mode_inv,
199 : av1_intra_mode_tree);
200 0 : av1_indices_from_tree(av1_inter_mode_ind, av1_inter_mode_inv,
201 : av1_inter_mode_tree);
202 0 : }
203 :
204 0 : static void write_intra_mode_kf(const AV1_COMMON *cm, FRAME_CONTEXT *frame_ctx,
205 : const MODE_INFO *mi, const MODE_INFO *above_mi,
206 : const MODE_INFO *left_mi, int block,
207 : PREDICTION_MODE mode, aom_writer *w) {
208 : #if CONFIG_INTRABC
209 : assert(!is_intrabc_block(&mi->mbmi));
210 : #endif // CONFIG_INTRABC
211 0 : aom_write_symbol(w, av1_intra_mode_ind[mode],
212 : get_y_mode_cdf(frame_ctx, mi, above_mi, left_mi, block),
213 : INTRA_MODES);
214 : (void)cm;
215 0 : }
216 :
217 : #if CONFIG_EXT_INTER && CONFIG_INTERINTRA
218 0 : static void write_interintra_mode(aom_writer *w, INTERINTRA_MODE mode,
219 : const aom_prob *probs) {
220 0 : av1_write_token(w, av1_interintra_mode_tree, probs,
221 0 : &interintra_mode_encodings[mode]);
222 0 : }
223 : #endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA
224 :
225 0 : static void write_inter_mode(aom_writer *w, PREDICTION_MODE mode,
226 : FRAME_CONTEXT *ec_ctx, const int16_t mode_ctx) {
227 0 : const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
228 0 : const aom_prob newmv_prob = ec_ctx->newmv_prob[newmv_ctx];
229 :
230 0 : aom_write(w, mode != NEWMV, newmv_prob);
231 :
232 0 : if (mode != NEWMV) {
233 0 : const int16_t zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
234 0 : const aom_prob zeromv_prob = ec_ctx->zeromv_prob[zeromv_ctx];
235 :
236 0 : if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET)) {
237 0 : assert(mode == ZEROMV);
238 0 : return;
239 : }
240 :
241 0 : aom_write(w, mode != ZEROMV, zeromv_prob);
242 :
243 0 : if (mode != ZEROMV) {
244 0 : int16_t refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
245 : aom_prob refmv_prob;
246 :
247 0 : if (mode_ctx & (1 << SKIP_NEARESTMV_OFFSET)) refmv_ctx = 6;
248 0 : if (mode_ctx & (1 << SKIP_NEARMV_OFFSET)) refmv_ctx = 7;
249 0 : if (mode_ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) refmv_ctx = 8;
250 :
251 0 : refmv_prob = ec_ctx->refmv_prob[refmv_ctx];
252 0 : aom_write(w, mode != NEARESTMV, refmv_prob);
253 : }
254 : }
255 : }
256 :
257 0 : static void write_drl_idx(const AV1_COMMON *cm, const MB_MODE_INFO *mbmi,
258 : const MB_MODE_INFO_EXT *mbmi_ext, aom_writer *w) {
259 0 : uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
260 :
261 0 : assert(mbmi->ref_mv_idx < 3);
262 :
263 : #if CONFIG_EXT_INTER
264 0 : if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV) {
265 : #else
266 : if (mbmi->mode == NEWMV) {
267 : #endif
268 : int idx;
269 0 : for (idx = 0; idx < 2; ++idx) {
270 0 : if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
271 0 : uint8_t drl_ctx =
272 0 : av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
273 0 : aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
274 :
275 0 : aom_write(w, mbmi->ref_mv_idx != idx, drl_prob);
276 0 : if (mbmi->ref_mv_idx == idx) return;
277 : }
278 : }
279 0 : return;
280 : }
281 :
282 0 : if (have_nearmv_in_inter_mode(mbmi->mode)) {
283 : int idx;
284 : // TODO(jingning): Temporary solution to compensate the NEARESTMV offset.
285 0 : for (idx = 1; idx < 3; ++idx) {
286 0 : if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
287 0 : uint8_t drl_ctx =
288 0 : av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
289 0 : aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
290 :
291 0 : aom_write(w, mbmi->ref_mv_idx != (idx - 1), drl_prob);
292 0 : if (mbmi->ref_mv_idx == (idx - 1)) return;
293 : }
294 : }
295 0 : return;
296 : }
297 : }
298 :
299 : #if CONFIG_EXT_INTER
300 0 : static void write_inter_compound_mode(AV1_COMMON *cm, aom_writer *w,
301 : PREDICTION_MODE mode,
302 : const int16_t mode_ctx) {
303 0 : const aom_prob *const inter_compound_probs =
304 0 : cm->fc->inter_compound_mode_probs[mode_ctx];
305 :
306 0 : assert(is_inter_compound_mode(mode));
307 0 : av1_write_token(w, av1_inter_compound_mode_tree, inter_compound_probs,
308 0 : &inter_compound_mode_encodings[INTER_COMPOUND_OFFSET(mode)]);
309 0 : }
310 : #endif // CONFIG_EXT_INTER
311 :
312 0 : static void encode_unsigned_max(struct aom_write_bit_buffer *wb, int data,
313 : int max) {
314 0 : aom_wb_write_literal(wb, data, get_unsigned_bits(max));
315 0 : }
316 :
317 : #if !CONFIG_EC_ADAPT || \
318 : (CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION || CONFIG_EXT_INTER)
319 0 : static void prob_diff_update(const aom_tree_index *tree,
320 : aom_prob probs[/*n - 1*/],
321 : const unsigned int counts[/*n - 1*/], int n,
322 : int probwt, aom_writer *w) {
323 : int i;
324 : unsigned int branch_ct[32][2];
325 :
326 : // Assuming max number of probabilities <= 32
327 0 : assert(n <= 32);
328 :
329 0 : av1_tree_probs_from_distribution(tree, branch_ct, counts);
330 0 : for (i = 0; i < n - 1; ++i)
331 0 : av1_cond_prob_diff_update(w, &probs[i], branch_ct[i], probwt);
332 0 : }
333 : #endif
334 :
335 : #if CONFIG_EXT_INTER || !CONFIG_EC_ADAPT
336 0 : static int prob_diff_update_savings(const aom_tree_index *tree,
337 : aom_prob probs[/*n - 1*/],
338 : const unsigned int counts[/*n - 1*/], int n,
339 : int probwt) {
340 : int i;
341 : unsigned int branch_ct[32][2];
342 0 : int savings = 0;
343 :
344 : // Assuming max number of probabilities <= 32
345 0 : assert(n <= 32);
346 0 : av1_tree_probs_from_distribution(tree, branch_ct, counts);
347 0 : for (i = 0; i < n - 1; ++i) {
348 0 : savings +=
349 0 : av1_cond_prob_diff_update_savings(&probs[i], branch_ct[i], probwt);
350 : }
351 0 : return savings;
352 : }
353 : #endif // CONFIG_EXT_INTER || !CONFIG_EC_ADAPT
354 :
355 : #if CONFIG_VAR_TX
356 0 : static void write_tx_size_vartx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
357 : const MB_MODE_INFO *mbmi, TX_SIZE tx_size,
358 : int depth, int blk_row, int blk_col,
359 : aom_writer *w) {
360 0 : const int tx_row = blk_row >> 1;
361 0 : const int tx_col = blk_col >> 1;
362 0 : const int max_blocks_high = max_block_high(xd, mbmi->sb_type, 0);
363 0 : const int max_blocks_wide = max_block_wide(xd, mbmi->sb_type, 0);
364 :
365 0 : int ctx = txfm_partition_context(xd->above_txfm_context + blk_col,
366 0 : xd->left_txfm_context + blk_row,
367 0 : mbmi->sb_type, tx_size);
368 :
369 0 : if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
370 :
371 0 : if (depth == MAX_VARTX_DEPTH) {
372 0 : txfm_partition_update(xd->above_txfm_context + blk_col,
373 0 : xd->left_txfm_context + blk_row, tx_size, tx_size);
374 0 : return;
375 : }
376 :
377 0 : if (tx_size == mbmi->inter_tx_size[tx_row][tx_col]) {
378 0 : aom_write(w, 0, cm->fc->txfm_partition_prob[ctx]);
379 0 : txfm_partition_update(xd->above_txfm_context + blk_col,
380 0 : xd->left_txfm_context + blk_row, tx_size, tx_size);
381 : } else {
382 0 : const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
383 0 : const int bsl = tx_size_wide_unit[sub_txs];
384 : int i;
385 :
386 0 : aom_write(w, 1, cm->fc->txfm_partition_prob[ctx]);
387 :
388 0 : if (tx_size == TX_8X8) {
389 0 : txfm_partition_update(xd->above_txfm_context + blk_col,
390 0 : xd->left_txfm_context + blk_row, sub_txs, tx_size);
391 0 : return;
392 : }
393 :
394 0 : assert(bsl > 0);
395 0 : for (i = 0; i < 4; ++i) {
396 0 : int offsetr = blk_row + (i >> 1) * bsl;
397 0 : int offsetc = blk_col + (i & 0x01) * bsl;
398 0 : write_tx_size_vartx(cm, xd, mbmi, sub_txs, depth + 1, offsetr, offsetc,
399 : w);
400 : }
401 : }
402 : }
403 :
404 0 : static void update_txfm_partition_probs(AV1_COMMON *cm, aom_writer *w,
405 : FRAME_COUNTS *counts, int probwt) {
406 : int k;
407 0 : for (k = 0; k < TXFM_PARTITION_CONTEXTS; ++k)
408 0 : av1_cond_prob_diff_update(w, &cm->fc->txfm_partition_prob[k],
409 0 : counts->txfm_partition[k], probwt);
410 0 : }
411 : #endif
412 :
413 0 : static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
414 : aom_writer *w) {
415 0 : const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
416 0 : const BLOCK_SIZE bsize = mbmi->sb_type;
417 : #if CONFIG_EC_ADAPT
418 0 : FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
419 : (void)cm;
420 : #else
421 : FRAME_CONTEXT *ec_ctx = cm->fc;
422 : #endif
423 : // For sub8x8 blocks the tx_size symbol does not need to be sent
424 : #if CONFIG_CB4X4 && (CONFIG_VAR_TX || CONFIG_EXT_TX) && CONFIG_RECT_TX
425 0 : if (bsize > BLOCK_4X4) {
426 : #else
427 : if (bsize >= BLOCK_8X8) {
428 : #endif
429 0 : const TX_SIZE tx_size = mbmi->tx_size;
430 0 : const int is_inter = is_inter_block(mbmi);
431 0 : const int tx_size_ctx = get_tx_size_context(xd);
432 0 : const int tx_size_cat = is_inter ? inter_tx_size_cat_lookup[bsize]
433 0 : : intra_tx_size_cat_lookup[bsize];
434 0 : const TX_SIZE coded_tx_size = txsize_sqr_up_map[tx_size];
435 0 : const int depth = tx_size_to_depth(coded_tx_size);
436 : #if CONFIG_EXT_TX && CONFIG_RECT_TX
437 0 : assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed(xd, mbmi)));
438 : #endif // CONFIG_EXT_TX && CONFIG_RECT_TX
439 :
440 0 : aom_write_symbol(w, depth, ec_ctx->tx_size_cdf[tx_size_cat][tx_size_ctx],
441 : tx_size_cat + 2);
442 : #if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
443 : if (is_quarter_tx_allowed(xd, mbmi, is_inter) && tx_size != coded_tx_size)
444 : aom_write(w, tx_size == quarter_txsize_lookup[bsize],
445 : cm->fc->quarter_tx_size_prob);
446 : #endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
447 : }
448 0 : }
449 :
450 0 : static void update_inter_mode_probs(AV1_COMMON *cm, aom_writer *w,
451 : FRAME_COUNTS *counts) {
452 : int i;
453 : #if CONFIG_TILE_GROUPS
454 0 : const int probwt = cm->num_tg;
455 : #else
456 : const int probwt = 1;
457 : #endif
458 0 : for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
459 0 : av1_cond_prob_diff_update(w, &cm->fc->newmv_prob[i], counts->newmv_mode[i],
460 : probwt);
461 0 : for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
462 0 : av1_cond_prob_diff_update(w, &cm->fc->zeromv_prob[i],
463 0 : counts->zeromv_mode[i], probwt);
464 0 : for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
465 0 : av1_cond_prob_diff_update(w, &cm->fc->refmv_prob[i], counts->refmv_mode[i],
466 : probwt);
467 0 : for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
468 0 : av1_cond_prob_diff_update(w, &cm->fc->drl_prob[i], counts->drl_mode[i],
469 : probwt);
470 0 : }
471 :
472 : #if CONFIG_EXT_INTER
473 0 : static void update_inter_compound_mode_probs(AV1_COMMON *cm, int probwt,
474 : aom_writer *w) {
475 0 : const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
476 0 : av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
477 : int i;
478 0 : int savings = 0;
479 0 : int do_update = 0;
480 0 : for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
481 0 : savings += prob_diff_update_savings(
482 0 : av1_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
483 0 : cm->counts.inter_compound_mode[i], INTER_COMPOUND_MODES, probwt);
484 : }
485 0 : do_update = savings > savings_thresh;
486 0 : aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
487 0 : if (do_update) {
488 0 : for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
489 0 : prob_diff_update(
490 0 : av1_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
491 0 : cm->counts.inter_compound_mode[i], INTER_COMPOUND_MODES, probwt, w);
492 : }
493 : }
494 0 : }
495 : #endif // CONFIG_EXT_INTER
496 :
497 0 : static int write_skip(const AV1_COMMON *cm, const MACROBLOCKD *xd,
498 : int segment_id, const MODE_INFO *mi, aom_writer *w) {
499 0 : if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
500 0 : return 1;
501 : } else {
502 0 : const int skip = mi->mbmi.skip;
503 0 : aom_write(w, skip, av1_get_skip_prob(cm, xd));
504 0 : return skip;
505 : }
506 : }
507 :
508 : #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
509 0 : static void write_motion_mode(const AV1_COMMON *cm, const MODE_INFO *mi,
510 : aom_writer *w) {
511 0 : const MB_MODE_INFO *mbmi = &mi->mbmi;
512 0 : MOTION_MODE last_motion_mode_allowed = motion_mode_allowed(
513 : #if CONFIG_GLOBAL_MOTION && SEPARATE_GLOBAL_MOTION
514 0 : 0, cm->global_motion,
515 : #endif // CONFIG_GLOBAL_MOTION && SEPARATE_GLOBAL_MOTION
516 : mi);
517 :
518 0 : if (last_motion_mode_allowed == SIMPLE_TRANSLATION) return;
519 : #if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
520 0 : if (last_motion_mode_allowed == OBMC_CAUSAL) {
521 0 : aom_write(w, mbmi->motion_mode == OBMC_CAUSAL,
522 0 : cm->fc->obmc_prob[mbmi->sb_type]);
523 : } else {
524 : #endif // CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
525 0 : av1_write_token(w, av1_motion_mode_tree,
526 0 : cm->fc->motion_mode_prob[mbmi->sb_type],
527 0 : &motion_mode_encodings[mbmi->motion_mode]);
528 : #if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
529 : }
530 : #endif // CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
531 : }
532 : #endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
533 :
534 : #if CONFIG_DELTA_Q
535 0 : static void write_delta_qindex(const AV1_COMMON *cm, const MACROBLOCKD *xd,
536 : int delta_qindex, aom_writer *w) {
537 0 : int sign = delta_qindex < 0;
538 0 : int abs = sign ? -delta_qindex : delta_qindex;
539 : int rem_bits, thr;
540 0 : int smallval = abs < DELTA_Q_SMALL ? 1 : 0;
541 : #if CONFIG_EC_ADAPT
542 0 : FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
543 : (void)cm;
544 : #else
545 : FRAME_CONTEXT *ec_ctx = cm->fc;
546 : (void)xd;
547 : #endif
548 :
549 0 : aom_write_symbol(w, AOMMIN(abs, DELTA_Q_SMALL), ec_ctx->delta_q_cdf,
550 : DELTA_Q_PROBS + 1);
551 :
552 0 : if (!smallval) {
553 0 : rem_bits = OD_ILOG_NZ(abs - 1) - 1;
554 0 : thr = (1 << rem_bits) + 1;
555 0 : aom_write_literal(w, rem_bits, 3);
556 0 : aom_write_literal(w, abs - thr, rem_bits);
557 : }
558 0 : if (abs > 0) {
559 0 : aom_write_bit(w, sign);
560 : }
561 0 : }
562 :
563 : #if !CONFIG_EC_ADAPT
564 : static void update_delta_q_probs(AV1_COMMON *cm, aom_writer *w,
565 : FRAME_COUNTS *counts) {
566 : int k;
567 : #if CONFIG_TILE_GROUPS
568 : const int probwt = cm->num_tg;
569 : #else
570 : const int probwt = 1;
571 : #endif
572 : #if CONFIG_EXT_DELTA_Q
573 : if (!cm->delta_q_present_flag) return;
574 : #endif // CONFIG_EXT_DELTA_Q
575 : for (k = 0; k < DELTA_Q_PROBS; ++k) {
576 : av1_cond_prob_diff_update(w, &cm->fc->delta_q_prob[k], counts->delta_q[k],
577 : probwt);
578 : }
579 : }
580 : #endif // CONFIG_EC_ADAPT
581 :
582 : #if CONFIG_EXT_DELTA_Q
583 0 : static void write_delta_lflevel(const AV1_COMMON *cm, const MACROBLOCKD *xd,
584 : int delta_lflevel, aom_writer *w) {
585 0 : int sign = delta_lflevel < 0;
586 0 : int abs = sign ? -delta_lflevel : delta_lflevel;
587 : int rem_bits, thr;
588 0 : int smallval = abs < DELTA_LF_SMALL ? 1 : 0;
589 : #if CONFIG_EC_ADAPT
590 0 : FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
591 : (void)cm;
592 : #else
593 : FRAME_CONTEXT *ec_ctx = cm->fc;
594 : (void)xd;
595 : #endif
596 :
597 0 : aom_write_symbol(w, AOMMIN(abs, DELTA_LF_SMALL), ec_ctx->delta_lf_cdf,
598 : DELTA_LF_PROBS + 1);
599 :
600 0 : if (!smallval) {
601 0 : rem_bits = OD_ILOG_NZ(abs - 1) - 1;
602 0 : thr = (1 << rem_bits) + 1;
603 0 : aom_write_literal(w, rem_bits, 3);
604 0 : aom_write_literal(w, abs - thr, rem_bits);
605 : }
606 0 : if (abs > 0) {
607 0 : aom_write_bit(w, sign);
608 : }
609 0 : }
610 :
611 : #if !CONFIG_EC_ADAPT
612 : static void update_delta_lf_probs(AV1_COMMON *cm, aom_writer *w,
613 : FRAME_COUNTS *counts) {
614 : int k;
615 : #if CONFIG_TILE_GROUPS
616 : const int probwt = cm->num_tg;
617 : #else
618 : const int probwt = 1;
619 : #endif
620 : if (!cm->delta_lf_present_flag) return;
621 : for (k = 0; k < DELTA_LF_PROBS; ++k) {
622 : av1_cond_prob_diff_update(w, &cm->fc->delta_lf_prob[k], counts->delta_lf[k],
623 : probwt);
624 : }
625 : }
626 : #endif // CONFIG_EC_ADAPT
627 : #endif // CONFIG_EXT_DELTA_Q
628 : #endif // CONFIG_DELTA_Q
629 :
630 0 : static void update_skip_probs(AV1_COMMON *cm, aom_writer *w,
631 : FRAME_COUNTS *counts) {
632 : int k;
633 : #if CONFIG_TILE_GROUPS
634 0 : const int probwt = cm->num_tg;
635 : #else
636 : const int probwt = 1;
637 : #endif
638 0 : for (k = 0; k < SKIP_CONTEXTS; ++k) {
639 0 : av1_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k],
640 : probwt);
641 : }
642 0 : }
643 :
644 : #if !CONFIG_EC_ADAPT
645 : static void update_switchable_interp_probs(AV1_COMMON *cm, aom_writer *w,
646 : FRAME_COUNTS *counts) {
647 : int j;
648 : for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) {
649 : #if CONFIG_TILE_GROUPS
650 : const int probwt = cm->num_tg;
651 : #else
652 : const int probwt = 1;
653 : #endif
654 : prob_diff_update(
655 : av1_switchable_interp_tree, cm->fc->switchable_interp_prob[j],
656 : counts->switchable_interp[j], SWITCHABLE_FILTERS, probwt, w);
657 : }
658 : }
659 : #endif
660 :
661 : #if !CONFIG_EC_ADAPT
662 : #if CONFIG_EXT_TX
663 : static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
664 : const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
665 : av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
666 : int i, j;
667 : int s;
668 : #if CONFIG_TILE_GROUPS
669 : const int probwt = cm->num_tg;
670 : #else
671 : const int probwt = 1;
672 : #endif
673 : for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
674 : int savings = 0;
675 : int do_update = 0;
676 : for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
677 : if (!use_inter_ext_tx_for_txsize[s][i]) continue;
678 : savings += prob_diff_update_savings(
679 : av1_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
680 : cm->counts.inter_ext_tx[s][i],
681 : num_ext_tx_set[ext_tx_set_type_inter[s]], probwt);
682 : }
683 : do_update = savings > savings_thresh;
684 : aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
685 : if (do_update) {
686 : for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
687 : if (!use_inter_ext_tx_for_txsize[s][i]) continue;
688 : prob_diff_update(av1_ext_tx_inter_tree[s],
689 : cm->fc->inter_ext_tx_prob[s][i],
690 : cm->counts.inter_ext_tx[s][i],
691 : num_ext_tx_set[ext_tx_set_type_inter[s]], probwt, w);
692 : }
693 : }
694 : }
695 :
696 : for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
697 : int savings = 0;
698 : int do_update = 0;
699 : for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
700 : if (!use_intra_ext_tx_for_txsize[s][i]) continue;
701 : for (j = 0; j < INTRA_MODES; ++j)
702 : savings += prob_diff_update_savings(
703 : av1_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
704 : cm->counts.intra_ext_tx[s][i][j],
705 : num_ext_tx_set[ext_tx_set_type_intra[s]], probwt);
706 : }
707 : do_update = savings > savings_thresh;
708 : aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
709 : if (do_update) {
710 : for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
711 : if (!use_intra_ext_tx_for_txsize[s][i]) continue;
712 : for (j = 0; j < INTRA_MODES; ++j)
713 : prob_diff_update(av1_ext_tx_intra_tree[s],
714 : cm->fc->intra_ext_tx_prob[s][i][j],
715 : cm->counts.intra_ext_tx[s][i][j],
716 : num_ext_tx_set[ext_tx_set_type_intra[s]], probwt, w);
717 : }
718 : }
719 : }
720 : }
721 :
722 : #else
723 : static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
724 : const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
725 : av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
726 : int i, j;
727 :
728 : int savings = 0;
729 : int do_update = 0;
730 : #if CONFIG_TILE_GROUPS
731 : const int probwt = cm->num_tg;
732 : #else
733 : const int probwt = 1;
734 : #endif
735 : for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
736 : for (j = 0; j < TX_TYPES; ++j)
737 : savings += prob_diff_update_savings(
738 : av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
739 : cm->counts.intra_ext_tx[i][j], TX_TYPES, probwt);
740 : }
741 : do_update = savings > savings_thresh;
742 : aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
743 : if (do_update) {
744 : for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
745 : for (j = 0; j < TX_TYPES; ++j) {
746 : prob_diff_update(av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
747 : cm->counts.intra_ext_tx[i][j], TX_TYPES, probwt, w);
748 : }
749 : }
750 : }
751 :
752 : savings = 0;
753 : for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
754 : savings +=
755 : prob_diff_update_savings(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
756 : cm->counts.inter_ext_tx[i], TX_TYPES, probwt);
757 : }
758 : do_update = savings > savings_thresh;
759 : aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
760 : if (do_update) {
761 : for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
762 : prob_diff_update(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
763 : cm->counts.inter_ext_tx[i], TX_TYPES, probwt, w);
764 : }
765 : }
766 : }
767 : #endif // CONFIG_EXT_TX
768 : #endif // !CONFIG_EC_ADAPT
769 : #if CONFIG_PALETTE
770 0 : static void pack_palette_tokens(aom_writer *w, const TOKENEXTRA **tp, int n,
771 : int num) {
772 : int i;
773 0 : const TOKENEXTRA *p = *tp;
774 :
775 0 : for (i = 0; i < num; ++i) {
776 0 : av1_write_token(
777 0 : w, av1_palette_color_index_tree[n - PALETTE_MIN_SIZE], p->context_tree,
778 0 : &palette_color_index_encodings[n - PALETTE_MIN_SIZE][p->token]);
779 0 : ++p;
780 : }
781 :
782 0 : *tp = p;
783 0 : }
784 : #endif // CONFIG_PALETTE
785 :
786 : #if !CONFIG_PVQ
787 : #if CONFIG_SUPERTX
788 : static void update_supertx_probs(AV1_COMMON *cm, int probwt, aom_writer *w) {
789 : const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
790 : av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
791 : int i, j;
792 : int savings = 0;
793 : int do_update = 0;
794 : for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
795 : for (j = TX_8X8; j < TX_SIZES; ++j) {
796 : savings += av1_cond_prob_diff_update_savings(
797 : &cm->fc->supertx_prob[i][j], cm->counts.supertx[i][j], probwt);
798 : }
799 : }
800 : do_update = savings > savings_thresh;
801 : aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
802 : if (do_update) {
803 : for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
804 : for (j = TX_8X8; j < TX_SIZES; ++j) {
805 : av1_cond_prob_diff_update(w, &cm->fc->supertx_prob[i][j],
806 : cm->counts.supertx[i][j], probwt);
807 : }
808 : }
809 : }
810 : }
811 : #endif // CONFIG_SUPERTX
812 :
813 : #if CONFIG_NEW_MULTISYMBOL
814 : static INLINE void write_coeff_extra(const aom_cdf_prob *const *cdf, int val,
815 : int n, aom_writer *w) {
816 : // Code the extra bits from LSB to MSB in groups of 4
817 : int i = 0;
818 : int count = 0;
819 : while (count < n) {
820 : const int size = AOMMIN(n - count, 4);
821 : const int mask = (1 << size) - 1;
822 : aom_write_cdf(w, val & mask, cdf[i++], 1 << size);
823 : val >>= size;
824 : count += size;
825 : }
826 : }
827 : #else
828 0 : static INLINE void write_coeff_extra(const aom_prob *pb, int value,
829 : int num_bits, int skip_bits, aom_writer *w,
830 : TOKEN_STATS *token_stats) {
831 : // Code the extra bits from MSB to LSB 1 bit at a time
832 : int index;
833 0 : for (index = skip_bits; index < num_bits; ++index) {
834 0 : const int shift = num_bits - index - 1;
835 0 : const int bb = (value >> shift) & 1;
836 0 : aom_write_record(w, bb, pb[index], token_stats);
837 : }
838 0 : }
839 : #endif
840 :
841 : #if !CONFIG_LV_MAP
842 0 : static void pack_mb_tokens(aom_writer *w, const TOKENEXTRA **tp,
843 : const TOKENEXTRA *const stop,
844 : aom_bit_depth_t bit_depth, const TX_SIZE tx_size,
845 : TOKEN_STATS *token_stats) {
846 0 : const TOKENEXTRA *p = *tp;
847 : #if CONFIG_VAR_TX
848 0 : int count = 0;
849 0 : const int seg_eob = tx_size_2d[tx_size];
850 : #endif
851 :
852 0 : while (p < stop && p->token != EOSB_TOKEN) {
853 0 : const int token = p->token;
854 0 : const int eob_val = p->eob_val;
855 0 : if (token == BLOCK_Z_TOKEN) {
856 0 : aom_write_symbol(w, 0, *p->head_cdf, HEAD_TOKENS + 1);
857 0 : p++;
858 : #if CONFIG_VAR_TX
859 0 : break;
860 : #endif
861 : continue;
862 : }
863 :
864 0 : const av1_extra_bit *const extra_bits = &av1_extra_bits[token];
865 0 : if (eob_val == LAST_EOB) {
866 : // Just code a flag indicating whether the value is >1 or 1.
867 0 : aom_write_bit(w, token != ONE_TOKEN);
868 : } else {
869 0 : int comb_symb = 2 * AOMMIN(token, TWO_TOKEN) - eob_val + p->first_val;
870 0 : aom_write_symbol(w, comb_symb, *p->head_cdf, HEAD_TOKENS + p->first_val);
871 : }
872 0 : if (token > ONE_TOKEN) {
873 0 : aom_write_symbol(w, token - TWO_TOKEN, *p->tail_cdf, TAIL_TOKENS);
874 : }
875 :
876 0 : if (extra_bits->base_val) {
877 0 : const int bit_string = p->extra;
878 0 : const int bit_string_length = extra_bits->len; // Length of extra bits to
879 0 : const int is_cat6 = (extra_bits->base_val == CAT6_MIN_VAL);
880 : // be written excluding
881 : // the sign bit.
882 0 : int skip_bits = is_cat6
883 : ? (int)sizeof(av1_cat6_prob) -
884 0 : av1_get_cat6_extrabits_size(tx_size, bit_depth)
885 0 : : 0;
886 :
887 0 : assert(!(bit_string >> (bit_string_length - skip_bits + 1)));
888 0 : if (bit_string_length > 0)
889 : #if CONFIG_NEW_MULTISYMBOL
890 : write_coeff_extra(extra_bits->cdf, bit_string >> 1,
891 : bit_string_length - skip_bits, w);
892 : #else
893 0 : write_coeff_extra(extra_bits->prob, bit_string >> 1, bit_string_length,
894 : skip_bits, w, token_stats);
895 : #endif
896 :
897 0 : aom_write_bit_record(w, bit_string & 1, token_stats);
898 : }
899 0 : ++p;
900 :
901 : #if CONFIG_VAR_TX
902 0 : ++count;
903 0 : if (eob_val == EARLY_EOB || count == seg_eob) break;
904 : #endif
905 : }
906 :
907 0 : *tp = p;
908 0 : }
909 : #endif // !CONFIG_LV_MAP
910 : #else // !CONFIG_PVQ
911 : static PVQ_INFO *get_pvq_block(PVQ_QUEUE *pvq_q) {
912 : PVQ_INFO *pvq;
913 :
914 : assert(pvq_q->curr_pos <= pvq_q->last_pos);
915 : assert(pvq_q->curr_pos < pvq_q->buf_len);
916 :
917 : pvq = pvq_q->buf + pvq_q->curr_pos;
918 : ++pvq_q->curr_pos;
919 :
920 : return pvq;
921 : }
922 :
923 : static void pack_pvq_tokens(aom_writer *w, MACROBLOCK *const x,
924 : MACROBLOCKD *const xd, int plane, BLOCK_SIZE bsize,
925 : const TX_SIZE tx_size) {
926 : PVQ_INFO *pvq;
927 : int idx, idy;
928 : const struct macroblockd_plane *const pd = &xd->plane[plane];
929 : od_adapt_ctx *adapt;
930 : int max_blocks_wide;
931 : int max_blocks_high;
932 : int step = (1 << tx_size);
933 : const BLOCK_SIZE plane_bsize =
934 : get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd);
935 :
936 : adapt = x->daala_enc.state.adapt;
937 :
938 : max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
939 : max_blocks_high = max_block_high(xd, plane_bsize, plane);
940 :
941 : for (idy = 0; idy < max_blocks_high; idy += step) {
942 : for (idx = 0; idx < max_blocks_wide; idx += step) {
943 : const int is_keyframe = 0;
944 : const int encode_flip = 0;
945 : const int flip = 0;
946 : int i;
947 : const int has_dc_skip = 1;
948 : int *exg = &adapt->pvq.pvq_exg[plane][tx_size][0];
949 : int *ext = adapt->pvq.pvq_ext + tx_size * PVQ_MAX_PARTITIONS;
950 : generic_encoder *model = adapt->pvq.pvq_param_model;
951 :
952 : pvq = get_pvq_block(x->pvq_q);
953 :
954 : // encode block skip info
955 : aom_write_symbol(w, pvq->ac_dc_coded,
956 : adapt->skip_cdf[2 * tx_size + (plane != 0)], 4);
957 :
958 : // AC coeffs coded?
959 : if (pvq->ac_dc_coded & AC_CODED) {
960 : assert(pvq->bs == tx_size);
961 : for (i = 0; i < pvq->nb_bands; i++) {
962 : if (i == 0 ||
963 : (!pvq->skip_rest && !(pvq->skip_dir & (1 << ((i - 1) % 3))))) {
964 : pvq_encode_partition(
965 : w, pvq->qg[i], pvq->theta[i], pvq->y + pvq->off[i],
966 : pvq->size[i], pvq->k[i], model, adapt, exg + i, ext + i,
967 : (plane != 0) * OD_TXSIZES * PVQ_MAX_PARTITIONS +
968 : pvq->bs * PVQ_MAX_PARTITIONS + i,
969 : is_keyframe, i == 0 && (i < pvq->nb_bands - 1), pvq->skip_rest,
970 : encode_flip, flip);
971 : }
972 : if (i == 0 && !pvq->skip_rest && pvq->bs > 0) {
973 : aom_write_symbol(
974 : w, pvq->skip_dir,
975 : &adapt->pvq
976 : .pvq_skip_dir_cdf[(plane != 0) + 2 * (pvq->bs - 1)][0],
977 : 7);
978 : }
979 : }
980 : }
981 : // Encode residue of DC coeff, if exist.
982 : if (!has_dc_skip || (pvq->ac_dc_coded & DC_CODED)) {
983 : generic_encode(w, &adapt->model_dc[plane],
984 : abs(pvq->dq_dc_residue) - has_dc_skip,
985 : &adapt->ex_dc[plane][pvq->bs][0], 2);
986 : }
987 : if ((pvq->ac_dc_coded & DC_CODED)) {
988 : aom_write_bit(w, pvq->dq_dc_residue < 0);
989 : }
990 : }
991 : } // for (idy = 0;
992 : }
993 : #endif // !CONFIG_PVG
994 :
995 : #if CONFIG_VAR_TX && !CONFIG_COEF_INTERLEAVE
996 : #if CONFIG_LV_MAP
997 : static void pack_txb_tokens(aom_writer *w,
998 : #if CONFIG_LV_MAP
999 : AV1_COMMON *cm,
1000 : #endif // CONFIG_LV_MAP
1001 : const TOKENEXTRA **tp,
1002 : const TOKENEXTRA *const tok_end,
1003 : #if CONFIG_PVQ || CONFIG_LV_MAP
1004 : MACROBLOCK *const x,
1005 : #endif
1006 : MACROBLOCKD *xd, MB_MODE_INFO *mbmi, int plane,
1007 : BLOCK_SIZE plane_bsize, aom_bit_depth_t bit_depth,
1008 : int block, int blk_row, int blk_col,
1009 : TX_SIZE tx_size, TOKEN_STATS *token_stats) {
1010 : const struct macroblockd_plane *const pd = &xd->plane[plane];
1011 : const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
1012 : const int tx_row = blk_row >> (1 - pd->subsampling_y);
1013 : const int tx_col = blk_col >> (1 - pd->subsampling_x);
1014 : TX_SIZE plane_tx_size;
1015 : const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
1016 : const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
1017 :
1018 : if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
1019 :
1020 : plane_tx_size =
1021 : plane ? uv_txsize_lookup[bsize][mbmi->inter_tx_size[tx_row][tx_col]][0][0]
1022 : : mbmi->inter_tx_size[tx_row][tx_col];
1023 :
1024 : if (tx_size == plane_tx_size) {
1025 : TOKEN_STATS tmp_token_stats;
1026 : init_token_stats(&tmp_token_stats);
1027 :
1028 : #if !CONFIG_PVQ
1029 : tran_low_t *tcoeff = BLOCK_OFFSET(x->mbmi_ext->tcoeff[plane], block);
1030 : uint16_t eob = x->mbmi_ext->eobs[plane][block];
1031 : TXB_CTX txb_ctx = { x->mbmi_ext->txb_skip_ctx[plane][block],
1032 : x->mbmi_ext->dc_sign_ctx[plane][block] };
1033 : av1_write_coeffs_txb(cm, xd, w, block, plane, tcoeff, eob, &txb_ctx);
1034 : #else
1035 : pack_pvq_tokens(w, x, xd, plane, bsize, tx_size);
1036 : #endif
1037 : #if CONFIG_RD_DEBUG
1038 : token_stats->txb_coeff_cost_map[blk_row][blk_col] = tmp_token_stats.cost;
1039 : token_stats->cost += tmp_token_stats.cost;
1040 : #endif
1041 : } else {
1042 : const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
1043 : const int bsl = tx_size_wide_unit[sub_txs];
1044 : int i;
1045 :
1046 : assert(bsl > 0);
1047 :
1048 : for (i = 0; i < 4; ++i) {
1049 : const int offsetr = blk_row + (i >> 1) * bsl;
1050 : const int offsetc = blk_col + (i & 0x01) * bsl;
1051 : const int step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
1052 :
1053 : if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
1054 :
1055 : pack_txb_tokens(w,
1056 : #if CONFIG_LV_MAP
1057 : cm,
1058 : #endif
1059 : tp, tok_end,
1060 : #if CONFIG_PVQ || CONFIG_LV_MAP
1061 : x,
1062 : #endif
1063 : xd, mbmi, plane, plane_bsize, bit_depth, block, offsetr,
1064 : offsetc, sub_txs, token_stats);
1065 : block += step;
1066 : }
1067 : }
1068 : }
1069 : #else // CONFIG_LV_MAP
1070 0 : static void pack_txb_tokens(aom_writer *w, const TOKENEXTRA **tp,
1071 : const TOKENEXTRA *const tok_end,
1072 : #if CONFIG_PVQ
1073 : MACROBLOCK *const x,
1074 : #endif
1075 : MACROBLOCKD *xd, MB_MODE_INFO *mbmi, int plane,
1076 : BLOCK_SIZE plane_bsize, aom_bit_depth_t bit_depth,
1077 : int block, int blk_row, int blk_col,
1078 : TX_SIZE tx_size, TOKEN_STATS *token_stats) {
1079 0 : const struct macroblockd_plane *const pd = &xd->plane[plane];
1080 0 : const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
1081 0 : const int tx_row = blk_row >> (1 - pd->subsampling_y);
1082 0 : const int tx_col = blk_col >> (1 - pd->subsampling_x);
1083 : TX_SIZE plane_tx_size;
1084 0 : const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
1085 0 : const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
1086 :
1087 0 : if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
1088 :
1089 0 : plane_tx_size =
1090 0 : plane ? uv_txsize_lookup[bsize][mbmi->inter_tx_size[tx_row][tx_col]][0][0]
1091 0 : : mbmi->inter_tx_size[tx_row][tx_col];
1092 :
1093 0 : if (tx_size == plane_tx_size) {
1094 : TOKEN_STATS tmp_token_stats;
1095 0 : init_token_stats(&tmp_token_stats);
1096 : #if !CONFIG_PVQ
1097 0 : pack_mb_tokens(w, tp, tok_end, bit_depth, tx_size, &tmp_token_stats);
1098 : #else
1099 : pack_pvq_tokens(w, x, xd, plane, bsize, tx_size);
1100 : #endif
1101 : #if CONFIG_RD_DEBUG
1102 : token_stats->txb_coeff_cost_map[blk_row][blk_col] = tmp_token_stats.cost;
1103 : token_stats->cost += tmp_token_stats.cost;
1104 : #endif
1105 : } else {
1106 0 : const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
1107 0 : const int bsl = tx_size_wide_unit[sub_txs];
1108 : int i;
1109 :
1110 0 : assert(bsl > 0);
1111 :
1112 0 : for (i = 0; i < 4; ++i) {
1113 0 : const int offsetr = blk_row + (i >> 1) * bsl;
1114 0 : const int offsetc = blk_col + (i & 0x01) * bsl;
1115 0 : const int step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
1116 :
1117 0 : if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
1118 :
1119 0 : pack_txb_tokens(w, tp, tok_end,
1120 : #if CONFIG_PVQ
1121 : x,
1122 : #endif
1123 : xd, mbmi, plane, plane_bsize, bit_depth, block, offsetr,
1124 : offsetc, sub_txs, token_stats);
1125 0 : block += step;
1126 : }
1127 : }
1128 : }
1129 : #endif // CONFIG_LV_MAP
1130 : #endif // CONFIG_VAR_TX
1131 :
1132 0 : static void write_segment_id(aom_writer *w, const struct segmentation *seg,
1133 : struct segmentation_probs *segp, int segment_id) {
1134 0 : if (seg->enabled && seg->update_map) {
1135 0 : aom_write_symbol(w, segment_id, segp->tree_cdf, MAX_SEGMENTS);
1136 : }
1137 0 : }
1138 :
1139 : // This function encodes the reference frame
1140 0 : static void write_ref_frames(const AV1_COMMON *cm, const MACROBLOCKD *xd,
1141 : aom_writer *w) {
1142 0 : const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1143 0 : const int is_compound = has_second_ref(mbmi);
1144 0 : const int segment_id = mbmi->segment_id;
1145 :
1146 : // If segment level coding of this signal is disabled...
1147 : // or the segment allows multiple reference frame options
1148 0 : if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
1149 0 : assert(!is_compound);
1150 0 : assert(mbmi->ref_frame[0] ==
1151 : get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
1152 : } else {
1153 : // does the feature use compound prediction or not
1154 : // (if not specified at the frame/segment level)
1155 0 : if (cm->reference_mode == REFERENCE_MODE_SELECT) {
1156 : #if SUB8X8_COMP_REF
1157 : aom_write(w, is_compound, av1_get_reference_mode_prob(cm, xd));
1158 : #else
1159 0 : if (mbmi->sb_type != BLOCK_4X4)
1160 0 : aom_write(w, is_compound, av1_get_reference_mode_prob(cm, xd));
1161 : #endif
1162 : } else {
1163 0 : assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
1164 : }
1165 :
1166 0 : if (is_compound) {
1167 : #if CONFIG_EXT_REFS
1168 0 : const int bit = (mbmi->ref_frame[0] == GOLDEN_FRAME ||
1169 0 : mbmi->ref_frame[0] == LAST3_FRAME);
1170 0 : const int bit_bwd = mbmi->ref_frame[1] == ALTREF_FRAME;
1171 : #else // CONFIG_EXT_REFS
1172 : const int bit = mbmi->ref_frame[0] == GOLDEN_FRAME;
1173 : #endif // CONFIG_EXT_REFS
1174 :
1175 0 : aom_write(w, bit, av1_get_pred_prob_comp_ref_p(cm, xd));
1176 :
1177 : #if CONFIG_EXT_REFS
1178 0 : if (!bit) {
1179 0 : const int bit1 = mbmi->ref_frame[0] == LAST_FRAME;
1180 0 : aom_write(w, bit1, av1_get_pred_prob_comp_ref_p1(cm, xd));
1181 : } else {
1182 0 : const int bit2 = mbmi->ref_frame[0] == GOLDEN_FRAME;
1183 0 : aom_write(w, bit2, av1_get_pred_prob_comp_ref_p2(cm, xd));
1184 : }
1185 0 : aom_write(w, bit_bwd, av1_get_pred_prob_comp_bwdref_p(cm, xd));
1186 : #endif // CONFIG_EXT_REFS
1187 : } else {
1188 : #if CONFIG_EXT_REFS
1189 0 : const int bit0 = (mbmi->ref_frame[0] == ALTREF_FRAME ||
1190 0 : mbmi->ref_frame[0] == BWDREF_FRAME);
1191 0 : aom_write(w, bit0, av1_get_pred_prob_single_ref_p1(cm, xd));
1192 :
1193 0 : if (bit0) {
1194 0 : const int bit1 = mbmi->ref_frame[0] == ALTREF_FRAME;
1195 0 : aom_write(w, bit1, av1_get_pred_prob_single_ref_p2(cm, xd));
1196 : } else {
1197 0 : const int bit2 = (mbmi->ref_frame[0] == LAST3_FRAME ||
1198 0 : mbmi->ref_frame[0] == GOLDEN_FRAME);
1199 0 : aom_write(w, bit2, av1_get_pred_prob_single_ref_p3(cm, xd));
1200 :
1201 0 : if (!bit2) {
1202 0 : const int bit3 = mbmi->ref_frame[0] != LAST_FRAME;
1203 0 : aom_write(w, bit3, av1_get_pred_prob_single_ref_p4(cm, xd));
1204 : } else {
1205 0 : const int bit4 = mbmi->ref_frame[0] != LAST3_FRAME;
1206 0 : aom_write(w, bit4, av1_get_pred_prob_single_ref_p5(cm, xd));
1207 : }
1208 : }
1209 : #else // CONFIG_EXT_REFS
1210 : const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
1211 : aom_write(w, bit0, av1_get_pred_prob_single_ref_p1(cm, xd));
1212 :
1213 : if (bit0) {
1214 : const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
1215 : aom_write(w, bit1, av1_get_pred_prob_single_ref_p2(cm, xd));
1216 : }
1217 : #endif // CONFIG_EXT_REFS
1218 : }
1219 : }
1220 0 : }
1221 :
1222 : #if CONFIG_FILTER_INTRA
1223 : static void write_filter_intra_mode_info(const AV1_COMMON *const cm,
1224 : const MACROBLOCKD *xd,
1225 : const MB_MODE_INFO *const mbmi,
1226 : int mi_row, int mi_col,
1227 : aom_writer *w) {
1228 : if (mbmi->mode == DC_PRED
1229 : #if CONFIG_PALETTE
1230 : && mbmi->palette_mode_info.palette_size[0] == 0
1231 : #endif // CONFIG_PALETTE
1232 : ) {
1233 : aom_write(w, mbmi->filter_intra_mode_info.use_filter_intra_mode[0],
1234 : cm->fc->filter_intra_probs[0]);
1235 : if (mbmi->filter_intra_mode_info.use_filter_intra_mode[0]) {
1236 : const FILTER_INTRA_MODE mode =
1237 : mbmi->filter_intra_mode_info.filter_intra_mode[0];
1238 : write_uniform(w, FILTER_INTRA_MODES, mode);
1239 : }
1240 : }
1241 :
1242 : #if CONFIG_CB4X4
1243 : if (!is_chroma_reference(mi_row, mi_col, mbmi->sb_type,
1244 : xd->plane[1].subsampling_x,
1245 : xd->plane[1].subsampling_y))
1246 : return;
1247 : #else
1248 : (void)xd;
1249 : (void)mi_row;
1250 : (void)mi_col;
1251 : #endif // CONFIG_CB4X4
1252 :
1253 : if (mbmi->uv_mode == DC_PRED
1254 : #if CONFIG_PALETTE
1255 : && mbmi->palette_mode_info.palette_size[1] == 0
1256 : #endif // CONFIG_PALETTE
1257 : ) {
1258 : aom_write(w, mbmi->filter_intra_mode_info.use_filter_intra_mode[1],
1259 : cm->fc->filter_intra_probs[1]);
1260 : if (mbmi->filter_intra_mode_info.use_filter_intra_mode[1]) {
1261 : const FILTER_INTRA_MODE mode =
1262 : mbmi->filter_intra_mode_info.filter_intra_mode[1];
1263 : write_uniform(w, FILTER_INTRA_MODES, mode);
1264 : }
1265 : }
1266 : }
1267 : #endif // CONFIG_FILTER_INTRA
1268 :
1269 : #if CONFIG_EXT_INTRA
1270 0 : static void write_intra_angle_info(const MACROBLOCKD *xd,
1271 : FRAME_CONTEXT *const ec_ctx, aom_writer *w) {
1272 0 : const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1273 0 : const BLOCK_SIZE bsize = mbmi->sb_type;
1274 : #if CONFIG_INTRA_INTERP
1275 : const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
1276 : int p_angle;
1277 : #endif // CONFIG_INTRA_INTERP
1278 :
1279 : (void)ec_ctx;
1280 0 : if (bsize < BLOCK_8X8) return;
1281 :
1282 0 : if (av1_is_directional_mode(mbmi->mode, bsize)) {
1283 0 : write_uniform(w, 2 * MAX_ANGLE_DELTA + 1,
1284 0 : MAX_ANGLE_DELTA + mbmi->angle_delta[0]);
1285 : #if CONFIG_INTRA_INTERP
1286 : p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
1287 : if (av1_is_intra_filter_switchable(p_angle)) {
1288 : aom_write_symbol(w, mbmi->intra_filter,
1289 : ec_ctx->intra_filter_cdf[intra_filter_ctx],
1290 : INTRA_FILTERS);
1291 : }
1292 : #endif // CONFIG_INTRA_INTERP
1293 : }
1294 :
1295 0 : if (av1_is_directional_mode(mbmi->uv_mode, bsize)) {
1296 0 : write_uniform(w, 2 * MAX_ANGLE_DELTA + 1,
1297 0 : MAX_ANGLE_DELTA + mbmi->angle_delta[1]);
1298 : }
1299 : }
1300 : #endif // CONFIG_EXT_INTRA
1301 :
1302 0 : static void write_mb_interp_filter(AV1_COMP *cpi, const MACROBLOCKD *xd,
1303 : aom_writer *w) {
1304 0 : AV1_COMMON *const cm = &cpi->common;
1305 0 : const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1306 : #if CONFIG_EC_ADAPT
1307 0 : FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1308 : #else
1309 : FRAME_CONTEXT *ec_ctx = cm->fc;
1310 : #endif
1311 :
1312 0 : if (!av1_is_interp_needed(xd)) {
1313 : #if CONFIG_DUAL_FILTER
1314 0 : for (int i = 0; i < 4; ++i)
1315 0 : assert(mbmi->interp_filter[i] == (cm->interp_filter == SWITCHABLE
1316 : ? EIGHTTAP_REGULAR
1317 : : cm->interp_filter));
1318 : #else
1319 : assert(mbmi->interp_filter == (cm->interp_filter == SWITCHABLE
1320 : ? EIGHTTAP_REGULAR
1321 : : cm->interp_filter));
1322 : #endif // CONFIG_DUAL_FILTER
1323 0 : return;
1324 : }
1325 0 : if (cm->interp_filter == SWITCHABLE) {
1326 : #if CONFIG_DUAL_FILTER
1327 : int dir;
1328 0 : for (dir = 0; dir < 2; ++dir) {
1329 0 : if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
1330 0 : (mbmi->ref_frame[1] > INTRA_FRAME &&
1331 0 : has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
1332 0 : const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
1333 0 : aom_write_symbol(w, av1_switchable_interp_ind[mbmi->interp_filter[dir]],
1334 0 : ec_ctx->switchable_interp_cdf[ctx],
1335 : SWITCHABLE_FILTERS);
1336 0 : ++cpi->interp_filter_selected[0][mbmi->interp_filter[dir]];
1337 : } else {
1338 0 : assert(mbmi->interp_filter[dir] == EIGHTTAP_REGULAR);
1339 : }
1340 : }
1341 : #else
1342 : {
1343 : const int ctx = av1_get_pred_context_switchable_interp(xd);
1344 : aom_write_symbol(w, av1_switchable_interp_ind[mbmi->interp_filter],
1345 : ec_ctx->switchable_interp_cdf[ctx], SWITCHABLE_FILTERS);
1346 : ++cpi->interp_filter_selected[0][mbmi->interp_filter];
1347 : }
1348 : #endif // CONFIG_DUAL_FILTER
1349 : }
1350 : }
1351 :
1352 : #if CONFIG_PALETTE
1353 : #if CONFIG_PALETTE_DELTA_ENCODING
1354 : // Transmit color values with delta encoding. Write the first value as
1355 : // literal, and the deltas between each value and the previous one. "min_val" is
1356 : // the smallest possible value of the deltas.
1357 : static void delta_encode_palette_colors(const int *colors, int num,
1358 : int bit_depth, int min_val,
1359 : aom_writer *w) {
1360 : if (num <= 0) return;
1361 : assert(colors[0] < (1 << bit_depth));
1362 : aom_write_literal(w, colors[0], bit_depth);
1363 : if (num == 1) return;
1364 : int max_delta = 0;
1365 : int deltas[PALETTE_MAX_SIZE];
1366 : memset(deltas, 0, sizeof(deltas));
1367 : for (int i = 1; i < num; ++i) {
1368 : assert(colors[i] < (1 << bit_depth));
1369 : const int delta = colors[i] - colors[i - 1];
1370 : deltas[i - 1] = delta;
1371 : assert(delta >= min_val);
1372 : if (delta > max_delta) max_delta = delta;
1373 : }
1374 : const int min_bits = bit_depth - 3;
1375 : int bits = AOMMAX(av1_ceil_log2(max_delta + 1 - min_val), min_bits);
1376 : assert(bits <= bit_depth);
1377 : int range = (1 << bit_depth) - colors[0] - min_val;
1378 : aom_write_literal(w, bits - min_bits, 2);
1379 : for (int i = 0; i < num - 1; ++i) {
1380 : aom_write_literal(w, deltas[i] - min_val, bits);
1381 : range -= deltas[i];
1382 : bits = AOMMIN(bits, av1_ceil_log2(range));
1383 : }
1384 : }
1385 :
1386 : // Transmit luma palette color values. First signal if each color in the color
1387 : // cache is used. Those colors that are not in the cache are transmitted with
1388 : // delta encoding.
1389 : static void write_palette_colors_y(const MACROBLOCKD *const xd,
1390 : const PALETTE_MODE_INFO *const pmi,
1391 : int bit_depth, aom_writer *w) {
1392 : const int n = pmi->palette_size[0];
1393 : const MODE_INFO *const above_mi = xd->above_mi;
1394 : const MODE_INFO *const left_mi = xd->left_mi;
1395 : uint16_t color_cache[2 * PALETTE_MAX_SIZE];
1396 : const int n_cache = av1_get_palette_cache(above_mi, left_mi, 0, color_cache);
1397 : int out_cache_colors[PALETTE_MAX_SIZE];
1398 : uint8_t cache_color_found[2 * PALETTE_MAX_SIZE];
1399 : const int n_out_cache =
1400 : av1_index_color_cache(color_cache, n_cache, pmi->palette_colors, n,
1401 : cache_color_found, out_cache_colors);
1402 : int n_in_cache = 0;
1403 : for (int i = 0; i < n_cache && n_in_cache < n; ++i) {
1404 : const int found = cache_color_found[i];
1405 : aom_write_bit(w, found);
1406 : n_in_cache += found;
1407 : }
1408 : assert(n_in_cache + n_out_cache == n);
1409 : delta_encode_palette_colors(out_cache_colors, n_out_cache, bit_depth, 1, w);
1410 : }
1411 :
1412 : // Write chroma palette color values. U channel is handled similarly to the luma
1413 : // channel. For v channel, either use delta encoding or transmit raw values
1414 : // directly, whichever costs less.
1415 : static void write_palette_colors_uv(const MACROBLOCKD *const xd,
1416 : const PALETTE_MODE_INFO *const pmi,
1417 : int bit_depth, aom_writer *w) {
1418 : const int n = pmi->palette_size[1];
1419 : const uint16_t *colors_u = pmi->palette_colors + PALETTE_MAX_SIZE;
1420 : const uint16_t *colors_v = pmi->palette_colors + 2 * PALETTE_MAX_SIZE;
1421 : // U channel colors.
1422 : const MODE_INFO *const above_mi = xd->above_mi;
1423 : const MODE_INFO *const left_mi = xd->left_mi;
1424 : uint16_t color_cache[2 * PALETTE_MAX_SIZE];
1425 : const int n_cache = av1_get_palette_cache(above_mi, left_mi, 1, color_cache);
1426 : int out_cache_colors[PALETTE_MAX_SIZE];
1427 : uint8_t cache_color_found[2 * PALETTE_MAX_SIZE];
1428 : const int n_out_cache = av1_index_color_cache(
1429 : color_cache, n_cache, colors_u, n, cache_color_found, out_cache_colors);
1430 : int n_in_cache = 0;
1431 : for (int i = 0; i < n_cache && n_in_cache < n; ++i) {
1432 : const int found = cache_color_found[i];
1433 : aom_write_bit(w, found);
1434 : n_in_cache += found;
1435 : }
1436 : delta_encode_palette_colors(out_cache_colors, n_out_cache, bit_depth, 0, w);
1437 :
1438 : // V channel colors. Don't use color cache as the colors are not sorted.
1439 : const int max_val = 1 << bit_depth;
1440 : int zero_count = 0, min_bits_v = 0;
1441 : int bits_v =
1442 : av1_get_palette_delta_bits_v(pmi, bit_depth, &zero_count, &min_bits_v);
1443 : const int rate_using_delta =
1444 : 2 + bit_depth + (bits_v + 1) * (n - 1) - zero_count;
1445 : const int rate_using_raw = bit_depth * n;
1446 : if (rate_using_delta < rate_using_raw) { // delta encoding
1447 : assert(colors_v[0] < (1 << bit_depth));
1448 : aom_write_bit(w, 1);
1449 : aom_write_literal(w, bits_v - min_bits_v, 2);
1450 : aom_write_literal(w, colors_v[0], bit_depth);
1451 : for (int i = 1; i < n; ++i) {
1452 : assert(colors_v[i] < (1 << bit_depth));
1453 : if (colors_v[i] == colors_v[i - 1]) { // No need to signal sign bit.
1454 : aom_write_literal(w, 0, bits_v);
1455 : continue;
1456 : }
1457 : const int delta = abs((int)colors_v[i] - colors_v[i - 1]);
1458 : const int sign_bit = colors_v[i] < colors_v[i - 1];
1459 : if (delta <= max_val - delta) {
1460 : aom_write_literal(w, delta, bits_v);
1461 : aom_write_bit(w, sign_bit);
1462 : } else {
1463 : aom_write_literal(w, max_val - delta, bits_v);
1464 : aom_write_bit(w, !sign_bit);
1465 : }
1466 : }
1467 : } else { // Transmit raw values.
1468 : aom_write_bit(w, 0);
1469 : for (int i = 0; i < n; ++i) {
1470 : assert(colors_v[i] < (1 << bit_depth));
1471 : aom_write_literal(w, colors_v[i], bit_depth);
1472 : }
1473 : }
1474 : }
1475 : #endif // CONFIG_PALETTE_DELTA_ENCODING
1476 :
1477 0 : static void write_palette_mode_info(const AV1_COMMON *cm, const MACROBLOCKD *xd,
1478 : const MODE_INFO *const mi, aom_writer *w) {
1479 0 : const MB_MODE_INFO *const mbmi = &mi->mbmi;
1480 0 : const MODE_INFO *const above_mi = xd->above_mi;
1481 0 : const MODE_INFO *const left_mi = xd->left_mi;
1482 0 : const BLOCK_SIZE bsize = mbmi->sb_type;
1483 0 : const PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
1484 :
1485 0 : if (mbmi->mode == DC_PRED) {
1486 0 : const int n = pmi->palette_size[0];
1487 0 : int palette_y_mode_ctx = 0;
1488 0 : if (above_mi)
1489 0 : palette_y_mode_ctx +=
1490 0 : (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
1491 0 : if (left_mi)
1492 0 : palette_y_mode_ctx +=
1493 0 : (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
1494 0 : aom_write(
1495 : w, n > 0,
1496 0 : av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_y_mode_ctx]);
1497 0 : if (n > 0) {
1498 0 : av1_write_token(w, av1_palette_size_tree,
1499 0 : av1_default_palette_y_size_prob[bsize - BLOCK_8X8],
1500 0 : &palette_size_encodings[n - PALETTE_MIN_SIZE]);
1501 : #if CONFIG_PALETTE_DELTA_ENCODING
1502 : write_palette_colors_y(xd, pmi, cm->bit_depth, w);
1503 : #else
1504 0 : for (int i = 0; i < n; ++i) {
1505 0 : assert(pmi->palette_colors[i] < (1 << cm->bit_depth));
1506 0 : aom_write_literal(w, pmi->palette_colors[i], cm->bit_depth);
1507 : }
1508 : #endif // CONFIG_PALETTE_DELTA_ENCODING
1509 0 : write_uniform(w, n, pmi->palette_first_color_idx[0]);
1510 : }
1511 : }
1512 :
1513 0 : if (mbmi->uv_mode == DC_PRED) {
1514 0 : const int n = pmi->palette_size[1];
1515 0 : const int palette_uv_mode_ctx = (pmi->palette_size[0] > 0);
1516 0 : aom_write(w, n > 0, av1_default_palette_uv_mode_prob[palette_uv_mode_ctx]);
1517 0 : if (n > 0) {
1518 0 : av1_write_token(w, av1_palette_size_tree,
1519 0 : av1_default_palette_uv_size_prob[bsize - BLOCK_8X8],
1520 0 : &palette_size_encodings[n - PALETTE_MIN_SIZE]);
1521 : #if CONFIG_PALETTE_DELTA_ENCODING
1522 : write_palette_colors_uv(xd, pmi, cm->bit_depth, w);
1523 : #else
1524 0 : for (int i = 0; i < n; ++i) {
1525 0 : assert(pmi->palette_colors[PALETTE_MAX_SIZE + i] <
1526 : (1 << cm->bit_depth));
1527 0 : assert(pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] <
1528 : (1 << cm->bit_depth));
1529 0 : aom_write_literal(w, pmi->palette_colors[PALETTE_MAX_SIZE + i],
1530 0 : cm->bit_depth);
1531 0 : aom_write_literal(w, pmi->palette_colors[2 * PALETTE_MAX_SIZE + i],
1532 0 : cm->bit_depth);
1533 : }
1534 : #endif // CONFIG_PALETTE_DELTA_ENCODING
1535 0 : write_uniform(w, n, pmi->palette_first_color_idx[1]);
1536 : }
1537 : }
1538 0 : }
1539 : #endif // CONFIG_PALETTE
1540 :
1541 0 : void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd,
1542 : #if CONFIG_SUPERTX
1543 : const int supertx_enabled,
1544 : #endif
1545 : #if CONFIG_TXK_SEL
1546 : int block, int plane,
1547 : #endif
1548 : aom_writer *w) {
1549 0 : MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
1550 0 : const int is_inter = is_inter_block(mbmi);
1551 : #if CONFIG_VAR_TX
1552 0 : const TX_SIZE tx_size = is_inter ? mbmi->min_tx_size : mbmi->tx_size;
1553 : #else
1554 : const TX_SIZE tx_size = mbmi->tx_size;
1555 : #endif // CONFIG_VAR_TX
1556 : #if CONFIG_EC_ADAPT
1557 0 : FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1558 : #else
1559 : FRAME_CONTEXT *ec_ctx = cm->fc;
1560 : #endif
1561 :
1562 : #if !CONFIG_TXK_SEL
1563 0 : TX_TYPE tx_type = mbmi->tx_type;
1564 : #else
1565 : // Only y plane's tx_type is transmitted
1566 : if (plane > 0) return;
1567 : PLANE_TYPE plane_type = get_plane_type(plane);
1568 : TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
1569 : #endif
1570 :
1571 : if (!FIXED_TX_TYPE) {
1572 : #if CONFIG_EXT_TX
1573 0 : const TX_SIZE square_tx_size = txsize_sqr_map[tx_size];
1574 0 : const BLOCK_SIZE bsize = mbmi->sb_type;
1575 0 : if (get_ext_tx_types(tx_size, bsize, is_inter, cm->reduced_tx_set_used) >
1576 0 : 1 &&
1577 0 : ((!cm->seg.enabled && cm->base_qindex > 0) ||
1578 0 : (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) &&
1579 0 : !mbmi->skip &&
1580 : #if CONFIG_SUPERTX
1581 : !supertx_enabled &&
1582 : #endif // CONFIG_SUPERTX
1583 0 : !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
1584 0 : const int eset =
1585 0 : get_ext_tx_set(tx_size, bsize, is_inter, cm->reduced_tx_set_used);
1586 0 : if (is_inter) {
1587 0 : assert(ext_tx_used_inter[eset][tx_type]);
1588 0 : if (eset > 0) {
1589 0 : aom_write_symbol(w, av1_ext_tx_inter_ind[eset][tx_type],
1590 0 : ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
1591 : ext_tx_cnt_inter[eset]);
1592 : }
1593 : } else if (ALLOW_INTRA_EXT_TX) {
1594 0 : assert(ext_tx_used_intra[eset][tx_type]);
1595 0 : if (eset > 0) {
1596 0 : aom_write_symbol(
1597 : w, av1_ext_tx_intra_ind[eset][tx_type],
1598 0 : ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
1599 : ext_tx_cnt_intra[eset]);
1600 : }
1601 : }
1602 : }
1603 : #else
1604 : if (tx_size < TX_32X32 &&
1605 : ((!cm->seg.enabled && cm->base_qindex > 0) ||
1606 : (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) &&
1607 : !mbmi->skip &&
1608 : #if CONFIG_SUPERTX
1609 : !supertx_enabled &&
1610 : #endif // CONFIG_SUPERTX
1611 : !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
1612 : if (is_inter) {
1613 : aom_write_symbol(w, av1_ext_tx_ind[tx_type],
1614 : ec_ctx->inter_ext_tx_cdf[tx_size], TX_TYPES);
1615 : } else {
1616 : aom_write_symbol(
1617 : w, av1_ext_tx_ind[tx_type],
1618 : ec_ctx->intra_ext_tx_cdf[tx_size]
1619 : [intra_mode_to_tx_type_context[mbmi->mode]],
1620 : TX_TYPES);
1621 : }
1622 : }
1623 : #endif // CONFIG_EXT_TX
1624 : }
1625 0 : }
1626 :
1627 0 : static void write_intra_mode(FRAME_CONTEXT *frame_ctx, BLOCK_SIZE bsize,
1628 : PREDICTION_MODE mode, aom_writer *w) {
1629 0 : aom_write_symbol(w, av1_intra_mode_ind[mode],
1630 0 : frame_ctx->y_mode_cdf[size_group_lookup[bsize]],
1631 : INTRA_MODES);
1632 0 : }
1633 :
1634 0 : static void write_intra_uv_mode(FRAME_CONTEXT *frame_ctx,
1635 : PREDICTION_MODE uv_mode, PREDICTION_MODE y_mode,
1636 : aom_writer *w) {
1637 0 : aom_write_symbol(w, av1_intra_mode_ind[uv_mode],
1638 0 : frame_ctx->uv_mode_cdf[y_mode], INTRA_MODES);
1639 0 : }
1640 :
1641 : #if CONFIG_CFL
1642 : static void write_cfl_alphas(FRAME_CONTEXT *const frame_ctx, int skip, int ind,
1643 : const CFL_SIGN_TYPE signs[CFL_SIGNS],
1644 : aom_writer *w) {
1645 : if (skip) {
1646 : assert(ind == 0);
1647 : assert(signs[CFL_PRED_U] == CFL_SIGN_POS);
1648 : assert(signs[CFL_PRED_V] == CFL_SIGN_POS);
1649 : } else {
1650 : // Check for uninitialized signs
1651 : if (cfl_alpha_codes[ind][CFL_PRED_U] == 0)
1652 : assert(signs[CFL_PRED_U] == CFL_SIGN_POS);
1653 : if (cfl_alpha_codes[ind][CFL_PRED_V] == 0)
1654 : assert(signs[CFL_PRED_V] == CFL_SIGN_POS);
1655 :
1656 : // Write a symbol representing a combination of alpha Cb and alpha Cr.
1657 : aom_write_symbol(w, ind, frame_ctx->cfl_alpha_cdf, CFL_ALPHABET_SIZE);
1658 :
1659 : // Signs are only signaled for nonzero codes.
1660 : if (cfl_alpha_codes[ind][CFL_PRED_U] != 0)
1661 : aom_write_bit(w, signs[CFL_PRED_U]);
1662 : if (cfl_alpha_codes[ind][CFL_PRED_V] != 0)
1663 : aom_write_bit(w, signs[CFL_PRED_V]);
1664 : }
1665 : }
1666 : #endif
1667 :
1668 0 : static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row,
1669 : const int mi_col,
1670 : #if CONFIG_SUPERTX
1671 : int supertx_enabled,
1672 : #endif
1673 : aom_writer *w) {
1674 0 : AV1_COMMON *const cm = &cpi->common;
1675 : #if CONFIG_DELTA_Q || CONFIG_EC_ADAPT
1676 0 : MACROBLOCK *const x = &cpi->td.mb;
1677 0 : MACROBLOCKD *const xd = &x->e_mbd;
1678 : #else
1679 : const MACROBLOCK *x = &cpi->td.mb;
1680 : const MACROBLOCKD *xd = &x->e_mbd;
1681 : #endif
1682 : #if CONFIG_EC_ADAPT
1683 0 : FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1684 : #else
1685 : FRAME_CONTEXT *ec_ctx = cm->fc;
1686 : #endif
1687 0 : const MODE_INFO *mi = xd->mi[0];
1688 :
1689 0 : const struct segmentation *const seg = &cm->seg;
1690 0 : struct segmentation_probs *const segp = &cm->fc->seg;
1691 0 : const MB_MODE_INFO *const mbmi = &mi->mbmi;
1692 0 : const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1693 0 : const PREDICTION_MODE mode = mbmi->mode;
1694 0 : const int segment_id = mbmi->segment_id;
1695 0 : const BLOCK_SIZE bsize = mbmi->sb_type;
1696 0 : const int allow_hp = cm->allow_high_precision_mv;
1697 0 : const int is_inter = is_inter_block(mbmi);
1698 0 : const int is_compound = has_second_ref(mbmi);
1699 : int skip, ref;
1700 : #if CONFIG_CB4X4
1701 0 : const int unify_bsize = 1;
1702 : #else
1703 : const int unify_bsize = 0;
1704 : #endif
1705 : (void)mi_row;
1706 : (void)mi_col;
1707 :
1708 0 : if (seg->update_map) {
1709 0 : if (seg->temporal_update) {
1710 0 : const int pred_flag = mbmi->seg_id_predicted;
1711 0 : aom_prob pred_prob = av1_get_pred_prob_seg_id(segp, xd);
1712 0 : aom_write(w, pred_flag, pred_prob);
1713 0 : if (!pred_flag) write_segment_id(w, seg, segp, segment_id);
1714 : } else {
1715 0 : write_segment_id(w, seg, segp, segment_id);
1716 : }
1717 : }
1718 :
1719 : #if CONFIG_SUPERTX
1720 : if (supertx_enabled)
1721 : skip = mbmi->skip;
1722 : else
1723 : skip = write_skip(cm, xd, segment_id, mi, w);
1724 : #else
1725 0 : skip = write_skip(cm, xd, segment_id, mi, w);
1726 : #endif // CONFIG_SUPERTX
1727 : #if CONFIG_DELTA_Q
1728 0 : if (cm->delta_q_present_flag) {
1729 0 : int super_block_upper_left =
1730 0 : ((mi_row & MAX_MIB_MASK) == 0) && ((mi_col & MAX_MIB_MASK) == 0);
1731 0 : if ((bsize != BLOCK_LARGEST || skip == 0) && super_block_upper_left) {
1732 0 : assert(mbmi->current_q_index > 0);
1733 0 : int reduced_delta_qindex =
1734 0 : (mbmi->current_q_index - xd->prev_qindex) / cm->delta_q_res;
1735 0 : write_delta_qindex(cm, xd, reduced_delta_qindex, w);
1736 0 : xd->prev_qindex = mbmi->current_q_index;
1737 : #if CONFIG_EXT_DELTA_Q
1738 0 : if (cm->delta_lf_present_flag) {
1739 0 : int reduced_delta_lflevel =
1740 0 : (mbmi->current_delta_lf_from_base - xd->prev_delta_lf_from_base) /
1741 0 : cm->delta_lf_res;
1742 0 : write_delta_lflevel(cm, xd, reduced_delta_lflevel, w);
1743 0 : xd->prev_delta_lf_from_base = mbmi->current_delta_lf_from_base;
1744 : }
1745 : #endif // CONFIG_EXT_DELTA_Q
1746 : }
1747 : }
1748 : #endif
1749 :
1750 : #if CONFIG_SUPERTX
1751 : if (!supertx_enabled)
1752 : #endif // CONFIG_SUPERTX
1753 0 : if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
1754 0 : aom_write(w, is_inter, av1_get_intra_inter_prob(cm, xd));
1755 :
1756 0 : if (cm->tx_mode == TX_MODE_SELECT &&
1757 : #if CONFIG_CB4X4 && (CONFIG_VAR_TX || CONFIG_RECT_TX)
1758 : #if CONFIG_RECT_TX
1759 0 : bsize > BLOCK_4X4 &&
1760 : #else
1761 : (bsize >= BLOCK_8X8 || (bsize > BLOCK_4X4 && is_inter)) &&
1762 : #endif // CONFIG_RECT_TX
1763 : #else
1764 : bsize >= BLOCK_8X8 &&
1765 : #endif
1766 : #if CONFIG_SUPERTX
1767 : !supertx_enabled &&
1768 : #endif // CONFIG_SUPERTX
1769 0 : !(is_inter && skip) && !xd->lossless[segment_id]) {
1770 : #if CONFIG_VAR_TX
1771 0 : if (is_inter) { // This implies skip flag is 0.
1772 0 : const TX_SIZE max_tx_size = get_vartx_max_txsize(mbmi, bsize);
1773 0 : const int bh = tx_size_high_unit[max_tx_size];
1774 0 : const int bw = tx_size_wide_unit[max_tx_size];
1775 0 : const int width = block_size_wide[bsize] >> tx_size_wide_log2[0];
1776 0 : const int height = block_size_high[bsize] >> tx_size_wide_log2[0];
1777 : int idx, idy;
1778 0 : for (idy = 0; idy < height; idy += bh)
1779 0 : for (idx = 0; idx < width; idx += bw)
1780 0 : write_tx_size_vartx(cm, xd, mbmi, max_tx_size, height != width, idy,
1781 : idx, w);
1782 : } else {
1783 0 : set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, skip, xd);
1784 0 : write_selected_tx_size(cm, xd, w);
1785 : }
1786 : } else {
1787 0 : set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, skip, xd);
1788 : #else
1789 : write_selected_tx_size(cm, xd, w);
1790 : #endif
1791 : }
1792 :
1793 0 : if (!is_inter) {
1794 0 : if (bsize >= BLOCK_8X8 || unify_bsize) {
1795 0 : write_intra_mode(ec_ctx, bsize, mode, w);
1796 : } else {
1797 : int idx, idy;
1798 0 : const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
1799 0 : const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
1800 0 : for (idy = 0; idy < 2; idy += num_4x4_h) {
1801 0 : for (idx = 0; idx < 2; idx += num_4x4_w) {
1802 0 : const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
1803 0 : write_intra_mode(ec_ctx, bsize, b_mode, w);
1804 : }
1805 : }
1806 : }
1807 : #if CONFIG_CB4X4
1808 0 : if (is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x,
1809 : xd->plane[1].subsampling_y)) {
1810 0 : write_intra_uv_mode(ec_ctx, mbmi->uv_mode, mode, w);
1811 : #else // !CONFIG_CB4X4
1812 : write_intra_uv_mode(ec_ctx, mbmi->uv_mode, mode, w);
1813 : #endif // CONFIG_CB4X4
1814 :
1815 : #if CONFIG_CFL
1816 : if (mbmi->uv_mode == DC_PRED) {
1817 : write_cfl_alphas(ec_ctx, mbmi->skip, mbmi->cfl_alpha_idx,
1818 : mbmi->cfl_alpha_signs, w);
1819 : }
1820 : #endif
1821 :
1822 : #if CONFIG_CB4X4
1823 : }
1824 : #endif
1825 :
1826 : #if CONFIG_EXT_INTRA
1827 0 : write_intra_angle_info(xd, ec_ctx, w);
1828 : #endif // CONFIG_EXT_INTRA
1829 : #if CONFIG_PALETTE
1830 0 : if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools)
1831 0 : write_palette_mode_info(cm, xd, mi, w);
1832 : #endif // CONFIG_PALETTE
1833 : #if CONFIG_FILTER_INTRA
1834 : if (bsize >= BLOCK_8X8 || unify_bsize)
1835 : write_filter_intra_mode_info(cm, xd, mbmi, mi_row, mi_col, w);
1836 : #endif // CONFIG_FILTER_INTRA
1837 : } else {
1838 : int16_t mode_ctx;
1839 0 : write_ref_frames(cm, xd, w);
1840 :
1841 : #if CONFIG_EXT_INTER
1842 0 : if (is_compound)
1843 0 : mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
1844 : else
1845 : #endif // CONFIG_EXT_INTER
1846 0 : mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
1847 0 : mbmi->ref_frame, bsize, -1);
1848 :
1849 : // If segment skip is not enabled code the mode.
1850 0 : if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
1851 0 : if (bsize >= BLOCK_8X8 || unify_bsize) {
1852 : #if CONFIG_EXT_INTER
1853 0 : if (is_inter_compound_mode(mode))
1854 0 : write_inter_compound_mode(cm, w, mode, mode_ctx);
1855 0 : else if (is_inter_singleref_mode(mode))
1856 : #endif // CONFIG_EXT_INTER
1857 0 : write_inter_mode(w, mode, ec_ctx, mode_ctx);
1858 :
1859 : #if CONFIG_EXT_INTER
1860 0 : if (mode == NEWMV || mode == NEW_NEWMV ||
1861 0 : have_nearmv_in_inter_mode(mode))
1862 : #else
1863 : if (mode == NEARMV || mode == NEWMV)
1864 : #endif
1865 0 : write_drl_idx(cm, mbmi, mbmi_ext, w);
1866 : else
1867 0 : assert(mbmi->ref_mv_idx == 0);
1868 : }
1869 : }
1870 :
1871 : #if !CONFIG_DUAL_FILTER && !CONFIG_WARPED_MOTION && !CONFIG_GLOBAL_MOTION
1872 : write_mb_interp_filter(cpi, xd, w);
1873 : #endif // !CONFIG_DUAL_FILTER && !CONFIG_WARPED_MOTION
1874 :
1875 0 : if (bsize < BLOCK_8X8 && !unify_bsize) {
1876 0 : const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
1877 0 : const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
1878 : int idx, idy;
1879 0 : for (idy = 0; idy < 2; idy += num_4x4_h) {
1880 0 : for (idx = 0; idx < 2; idx += num_4x4_w) {
1881 0 : const int j = idy * 2 + idx;
1882 0 : const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
1883 : #if CONFIG_EXT_INTER
1884 0 : if (!is_compound)
1885 : #endif // CONFIG_EXT_INTER
1886 0 : mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
1887 0 : mbmi->ref_frame, bsize, j);
1888 : #if CONFIG_EXT_INTER
1889 0 : if (is_inter_compound_mode(b_mode))
1890 0 : write_inter_compound_mode(cm, w, b_mode, mode_ctx);
1891 0 : else if (is_inter_singleref_mode(b_mode))
1892 : #endif // CONFIG_EXT_INTER
1893 0 : write_inter_mode(w, b_mode, ec_ctx, mode_ctx);
1894 :
1895 : #if CONFIG_EXT_INTER
1896 0 : if (b_mode == NEWMV || b_mode == NEW_NEWMV) {
1897 : #else
1898 : if (b_mode == NEWMV) {
1899 : #endif // CONFIG_EXT_INTER
1900 0 : for (ref = 0; ref < 1 + is_compound; ++ref) {
1901 0 : int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1902 0 : int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1903 0 : mbmi_ext->ref_mv_stack[rf_type], ref,
1904 0 : mbmi->ref_mv_idx);
1905 0 : nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1906 0 : av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
1907 : #if CONFIG_EXT_INTER
1908 : &mi->bmi[j].ref_mv[ref].as_mv,
1909 : #else
1910 : &mi->bmi[j].pred_mv[ref].as_mv,
1911 : #endif // CONFIG_EXT_INTER
1912 : nmvc, allow_hp);
1913 : }
1914 : }
1915 : #if CONFIG_EXT_INTER
1916 0 : else if (b_mode == NEAREST_NEWMV || b_mode == NEAR_NEWMV) {
1917 0 : int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1918 0 : int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1919 0 : mbmi_ext->ref_mv_stack[rf_type], 1,
1920 0 : mbmi->ref_mv_idx);
1921 0 : nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1922 0 : av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
1923 : &mi->bmi[j].ref_mv[1].as_mv, nmvc, allow_hp);
1924 0 : } else if (b_mode == NEW_NEARESTMV || b_mode == NEW_NEARMV) {
1925 0 : int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1926 0 : int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1927 0 : mbmi_ext->ref_mv_stack[rf_type], 0,
1928 0 : mbmi->ref_mv_idx);
1929 0 : nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1930 0 : av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
1931 : &mi->bmi[j].ref_mv[0].as_mv, nmvc, allow_hp);
1932 : }
1933 : #endif // CONFIG_EXT_INTER
1934 : }
1935 : }
1936 : } else {
1937 : #if CONFIG_EXT_INTER
1938 0 : if (mode == NEWMV || mode == NEW_NEWMV) {
1939 : #else
1940 : if (mode == NEWMV) {
1941 : #endif // CONFIG_EXT_INTER
1942 : int_mv ref_mv;
1943 0 : for (ref = 0; ref < 1 + is_compound; ++ref) {
1944 0 : int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1945 0 : int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1946 0 : mbmi_ext->ref_mv_stack[rf_type], ref,
1947 0 : mbmi->ref_mv_idx);
1948 0 : nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1949 0 : ref_mv = mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0];
1950 0 : av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv, nmvc,
1951 : allow_hp);
1952 : }
1953 : #if CONFIG_EXT_INTER
1954 0 : } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
1955 0 : int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1956 0 : int nmv_ctx =
1957 0 : av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1958 0 : mbmi_ext->ref_mv_stack[rf_type], 1, mbmi->ref_mv_idx);
1959 0 : nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1960 0 : av1_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
1961 0 : &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv, nmvc,
1962 : allow_hp);
1963 0 : } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
1964 0 : int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1965 0 : int nmv_ctx =
1966 0 : av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1967 0 : mbmi_ext->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx);
1968 0 : nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1969 0 : av1_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
1970 0 : &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv, nmvc,
1971 : allow_hp);
1972 : #endif // CONFIG_EXT_INTER
1973 : }
1974 : }
1975 :
1976 : #if CONFIG_EXT_INTER && CONFIG_INTERINTRA
1977 0 : if (cpi->common.reference_mode != COMPOUND_REFERENCE &&
1978 : #if CONFIG_SUPERTX
1979 : !supertx_enabled &&
1980 : #endif // CONFIG_SUPERTX
1981 0 : cpi->common.allow_interintra_compound && is_interintra_allowed(mbmi)) {
1982 0 : const int interintra = mbmi->ref_frame[1] == INTRA_FRAME;
1983 0 : const int bsize_group = size_group_lookup[bsize];
1984 0 : aom_write(w, interintra, cm->fc->interintra_prob[bsize_group]);
1985 0 : if (interintra) {
1986 0 : write_interintra_mode(w, mbmi->interintra_mode,
1987 0 : cm->fc->interintra_mode_prob[bsize_group]);
1988 0 : if (is_interintra_wedge_used(bsize)) {
1989 0 : aom_write(w, mbmi->use_wedge_interintra,
1990 0 : cm->fc->wedge_interintra_prob[bsize]);
1991 0 : if (mbmi->use_wedge_interintra) {
1992 0 : aom_write_literal(w, mbmi->interintra_wedge_index,
1993 : get_wedge_bits_lookup(bsize));
1994 0 : assert(mbmi->interintra_wedge_sign == 0);
1995 : }
1996 : }
1997 : }
1998 : }
1999 : #endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA
2000 :
2001 : #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
2002 : #if CONFIG_SUPERTX
2003 : if (!supertx_enabled)
2004 : #endif // CONFIG_SUPERTX
2005 : #if CONFIG_EXT_INTER
2006 0 : if (mbmi->ref_frame[1] != INTRA_FRAME)
2007 : #endif // CONFIG_EXT_INTER
2008 0 : write_motion_mode(cm, mi, w);
2009 : #endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
2010 :
2011 : #if CONFIG_EXT_INTER
2012 0 : if (cpi->common.reference_mode != SINGLE_REFERENCE &&
2013 0 : is_inter_compound_mode(mbmi->mode)
2014 : #if CONFIG_MOTION_VAR
2015 0 : && mbmi->motion_mode == SIMPLE_TRANSLATION
2016 : #endif // CONFIG_MOTION_VAR
2017 0 : && is_any_masked_compound_used(bsize)) {
2018 : #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
2019 0 : if (cm->allow_masked_compound) {
2020 0 : av1_write_token(
2021 0 : w, av1_compound_type_tree, cm->fc->compound_type_prob[bsize],
2022 0 : &compound_type_encodings[mbmi->interinter_compound_type]);
2023 : #if CONFIG_WEDGE
2024 0 : if (mbmi->interinter_compound_type == COMPOUND_WEDGE) {
2025 0 : aom_write_literal(w, mbmi->wedge_index, get_wedge_bits_lookup(bsize));
2026 0 : aom_write_bit(w, mbmi->wedge_sign);
2027 : }
2028 : #endif // CONFIG_WEDGE
2029 : #if CONFIG_COMPOUND_SEGMENT
2030 0 : if (mbmi->interinter_compound_type == COMPOUND_SEG) {
2031 0 : aom_write_literal(w, mbmi->mask_type, MAX_SEG_MASK_BITS);
2032 : }
2033 : #endif // CONFIG_COMPOUND_SEGMENT
2034 : }
2035 : #endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
2036 : }
2037 : #endif // CONFIG_EXT_INTER
2038 :
2039 : #if CONFIG_DUAL_FILTER || CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION
2040 0 : write_mb_interp_filter(cpi, xd, w);
2041 : #endif // CONFIG_DUAL_FILTE || CONFIG_WARPED_MOTION
2042 : }
2043 :
2044 : #if !CONFIG_TXK_SEL
2045 0 : av1_write_tx_type(cm, xd,
2046 : #if CONFIG_SUPERTX
2047 : supertx_enabled,
2048 : #endif
2049 : w);
2050 : #endif // !CONFIG_TXK_SEL
2051 0 : }
2052 :
2053 0 : static void write_mb_modes_kf(AV1_COMMON *cm,
2054 : #if CONFIG_DELTA_Q
2055 : MACROBLOCKD *xd,
2056 : #else
2057 : const MACROBLOCKD *xd,
2058 : #endif // CONFIG_DELTA_Q
2059 : #if CONFIG_INTRABC
2060 : const MB_MODE_INFO_EXT *mbmi_ext,
2061 : #endif // CONFIG_INTRABC
2062 : const int mi_row, const int mi_col,
2063 : aom_writer *w) {
2064 0 : const struct segmentation *const seg = &cm->seg;
2065 0 : struct segmentation_probs *const segp = &cm->fc->seg;
2066 0 : const MODE_INFO *const mi = xd->mi[0];
2067 0 : const MODE_INFO *const above_mi = xd->above_mi;
2068 0 : const MODE_INFO *const left_mi = xd->left_mi;
2069 0 : const MB_MODE_INFO *const mbmi = &mi->mbmi;
2070 0 : const BLOCK_SIZE bsize = mbmi->sb_type;
2071 : #if CONFIG_CB4X4
2072 0 : const int unify_bsize = 1;
2073 : #else
2074 : const int unify_bsize = 0;
2075 : #endif
2076 : (void)mi_row;
2077 : (void)mi_col;
2078 :
2079 : #if CONFIG_EC_ADAPT
2080 0 : FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
2081 : #else
2082 : FRAME_CONTEXT *ec_ctx = cm->fc;
2083 : #endif
2084 :
2085 0 : if (seg->update_map) write_segment_id(w, seg, segp, mbmi->segment_id);
2086 :
2087 : #if CONFIG_DELTA_Q
2088 0 : const int skip = write_skip(cm, xd, mbmi->segment_id, mi, w);
2089 0 : if (cm->delta_q_present_flag) {
2090 0 : int super_block_upper_left =
2091 0 : ((mi_row & MAX_MIB_MASK) == 0) && ((mi_col & MAX_MIB_MASK) == 0);
2092 0 : if ((bsize != BLOCK_LARGEST || skip == 0) && super_block_upper_left) {
2093 0 : assert(mbmi->current_q_index > 0);
2094 0 : int reduced_delta_qindex =
2095 0 : (mbmi->current_q_index - xd->prev_qindex) / cm->delta_q_res;
2096 0 : write_delta_qindex(cm, xd, reduced_delta_qindex, w);
2097 0 : xd->prev_qindex = mbmi->current_q_index;
2098 : #if CONFIG_EXT_DELTA_Q
2099 0 : if (cm->delta_lf_present_flag) {
2100 0 : int reduced_delta_lflevel =
2101 0 : (mbmi->current_delta_lf_from_base - xd->prev_delta_lf_from_base) /
2102 0 : cm->delta_lf_res;
2103 0 : write_delta_lflevel(cm, xd, reduced_delta_lflevel, w);
2104 0 : xd->prev_delta_lf_from_base = mbmi->current_delta_lf_from_base;
2105 : }
2106 : #endif // CONFIG_EXT_DELTA_Q
2107 : }
2108 : }
2109 : #else
2110 : write_skip(cm, xd, mbmi->segment_id, mi, w);
2111 : #endif
2112 :
2113 0 : if (cm->tx_mode == TX_MODE_SELECT &&
2114 : #if CONFIG_CB4X4 && (CONFIG_VAR_TX || CONFIG_RECT_TX)
2115 : #if CONFIG_RECT_TX
2116 0 : bsize > BLOCK_4X4 &&
2117 : #else
2118 : bsize >= BLOCK_8X8 &&
2119 : #endif // CONFIG_RECT_TX
2120 : #else
2121 : bsize >= BLOCK_8X8 &&
2122 : #endif
2123 0 : !xd->lossless[mbmi->segment_id])
2124 0 : write_selected_tx_size(cm, xd, w);
2125 :
2126 : #if CONFIG_INTRABC
2127 : if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools) {
2128 : int use_intrabc = is_intrabc_block(mbmi);
2129 : aom_write(w, use_intrabc, ec_ctx->intrabc_prob);
2130 : if (use_intrabc) {
2131 : assert(mbmi->mode == DC_PRED);
2132 : assert(mbmi->uv_mode == DC_PRED);
2133 : int_mv dv_ref = mbmi_ext->ref_mvs[INTRA_FRAME][0];
2134 : av1_encode_dv(w, &mbmi->mv[0].as_mv, &dv_ref.as_mv, &ec_ctx->ndvc);
2135 : #if CONFIG_EXT_TX && !CONFIG_TXK_SEL
2136 : av1_write_tx_type(cm, xd,
2137 : #if CONFIG_SUPERTX
2138 : 0,
2139 : #endif
2140 : w);
2141 : #endif // CONFIG_EXT_TX && !CONFIG_TXK_SEL
2142 : return;
2143 : }
2144 : }
2145 : #endif // CONFIG_INTRABC
2146 :
2147 0 : if (bsize >= BLOCK_8X8 || unify_bsize) {
2148 0 : write_intra_mode_kf(cm, ec_ctx, mi, above_mi, left_mi, 0, mbmi->mode, w);
2149 : } else {
2150 0 : const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
2151 0 : const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
2152 : int idx, idy;
2153 :
2154 0 : for (idy = 0; idy < 2; idy += num_4x4_h) {
2155 0 : for (idx = 0; idx < 2; idx += num_4x4_w) {
2156 0 : const int block = idy * 2 + idx;
2157 0 : write_intra_mode_kf(cm, ec_ctx, mi, above_mi, left_mi, block,
2158 0 : mi->bmi[block].as_mode, w);
2159 : }
2160 : }
2161 : }
2162 :
2163 : #if CONFIG_CB4X4
2164 0 : if (is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x,
2165 : xd->plane[1].subsampling_y)) {
2166 0 : write_intra_uv_mode(ec_ctx, mbmi->uv_mode, mbmi->mode, w);
2167 : #else // !CONFIG_CB4X4
2168 : write_intra_uv_mode(ec_ctx, mbmi->uv_mode, mbmi->mode, w);
2169 : #endif // CONFIG_CB4X4
2170 :
2171 : #if CONFIG_CFL
2172 : if (mbmi->uv_mode == DC_PRED) {
2173 : write_cfl_alphas(ec_ctx, mbmi->skip, mbmi->cfl_alpha_idx,
2174 : mbmi->cfl_alpha_signs, w);
2175 : }
2176 : #endif
2177 :
2178 : #if CONFIG_CB4X4
2179 : }
2180 : #endif
2181 : #if CONFIG_EXT_INTRA
2182 0 : write_intra_angle_info(xd, ec_ctx, w);
2183 : #endif // CONFIG_EXT_INTRA
2184 : #if CONFIG_PALETTE
2185 0 : if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools)
2186 0 : write_palette_mode_info(cm, xd, mi, w);
2187 : #endif // CONFIG_PALETTE
2188 : #if CONFIG_FILTER_INTRA
2189 : if (bsize >= BLOCK_8X8 || unify_bsize)
2190 : write_filter_intra_mode_info(cm, xd, mbmi, mi_row, mi_col, w);
2191 : #endif // CONFIG_FILTER_INTRA
2192 :
2193 : #if !CONFIG_TXK_SEL
2194 0 : av1_write_tx_type(cm, xd,
2195 : #if CONFIG_SUPERTX
2196 : 0,
2197 : #endif
2198 : w);
2199 : #endif // !CONFIG_TXK_SEL
2200 0 : }
2201 :
2202 : #if CONFIG_SUPERTX
2203 : #define write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
2204 : mi_row, mi_col) \
2205 : write_modes_b(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row, mi_col)
2206 : #else
2207 : #define write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
2208 : mi_row, mi_col) \
2209 : write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col)
2210 : #endif // CONFIG_SUPERTX
2211 :
2212 : #if CONFIG_RD_DEBUG
2213 : static void dump_mode_info(MODE_INFO *mi) {
2214 : printf("\nmi->mbmi.mi_row == %d\n", mi->mbmi.mi_row);
2215 : printf("&& mi->mbmi.mi_col == %d\n", mi->mbmi.mi_col);
2216 : printf("&& mi->mbmi.sb_type == %d\n", mi->mbmi.sb_type);
2217 : printf("&& mi->mbmi.tx_size == %d\n", mi->mbmi.tx_size);
2218 : if (mi->mbmi.sb_type >= BLOCK_8X8) {
2219 : printf("&& mi->mbmi.mode == %d\n", mi->mbmi.mode);
2220 : } else {
2221 : printf("&& mi->bmi[0].as_mode == %d\n", mi->bmi[0].as_mode);
2222 : }
2223 : }
2224 : static int rd_token_stats_mismatch(RD_STATS *rd_stats, TOKEN_STATS *token_stats,
2225 : int plane) {
2226 : if (rd_stats->txb_coeff_cost[plane] != token_stats->cost) {
2227 : #if CONFIG_VAR_TX
2228 : int r, c;
2229 : #endif
2230 : printf("\nplane %d rd_stats->txb_coeff_cost %d token_stats->cost %d\n",
2231 : plane, rd_stats->txb_coeff_cost[plane], token_stats->cost);
2232 : #if CONFIG_VAR_TX
2233 : printf("rd txb_coeff_cost_map\n");
2234 : for (r = 0; r < TXB_COEFF_COST_MAP_SIZE; ++r) {
2235 : for (c = 0; c < TXB_COEFF_COST_MAP_SIZE; ++c) {
2236 : printf("%d ", rd_stats->txb_coeff_cost_map[plane][r][c]);
2237 : }
2238 : printf("\n");
2239 : }
2240 :
2241 : printf("pack txb_coeff_cost_map\n");
2242 : for (r = 0; r < TXB_COEFF_COST_MAP_SIZE; ++r) {
2243 : for (c = 0; c < TXB_COEFF_COST_MAP_SIZE; ++c) {
2244 : printf("%d ", token_stats->txb_coeff_cost_map[r][c]);
2245 : }
2246 : printf("\n");
2247 : }
2248 : #endif
2249 : return 1;
2250 : }
2251 : return 0;
2252 : }
2253 : #endif
2254 :
2255 0 : static void write_mbmi_b(AV1_COMP *cpi, const TileInfo *const tile,
2256 : aom_writer *w,
2257 : #if CONFIG_SUPERTX
2258 : int supertx_enabled,
2259 : #endif
2260 : int mi_row, int mi_col) {
2261 0 : AV1_COMMON *const cm = &cpi->common;
2262 0 : MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
2263 : MODE_INFO *m;
2264 : int bh, bw;
2265 0 : xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
2266 0 : m = xd->mi[0];
2267 :
2268 0 : assert(m->mbmi.sb_type <= cm->sb_size);
2269 :
2270 0 : bh = mi_size_high[m->mbmi.sb_type];
2271 0 : bw = mi_size_wide[m->mbmi.sb_type];
2272 :
2273 0 : cpi->td.mb.mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
2274 :
2275 0 : set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw,
2276 : #if CONFIG_DEPENDENT_HORZTILES
2277 : cm->dependent_horz_tiles,
2278 : #endif // CONFIG_DEPENDENT_HORZTILES
2279 : cm->mi_rows, cm->mi_cols);
2280 :
2281 0 : if (frame_is_intra_only(cm)) {
2282 0 : write_mb_modes_kf(cm, xd,
2283 : #if CONFIG_INTRABC
2284 : cpi->td.mb.mbmi_ext,
2285 : #endif // CONFIG_INTRABC
2286 : mi_row, mi_col, w);
2287 : } else {
2288 : #if CONFIG_VAR_TX
2289 0 : xd->above_txfm_context =
2290 0 : cm->above_txfm_context + (mi_col << TX_UNIT_WIDE_LOG2);
2291 0 : xd->left_txfm_context = xd->left_txfm_context_buffer +
2292 0 : ((mi_row & MAX_MIB_MASK) << TX_UNIT_HIGH_LOG2);
2293 : #endif
2294 : #if CONFIG_DUAL_FILTER
2295 : // has_subpel_mv_component needs the ref frame buffers set up to look
2296 : // up if they are scaled. has_subpel_mv_component is in turn needed by
2297 : // write_switchable_interp_filter, which is called by pack_inter_mode_mvs.
2298 0 : set_ref_ptrs(cm, xd, m->mbmi.ref_frame[0], m->mbmi.ref_frame[1]);
2299 : #endif // CONFIG_DUAL_FILTER
2300 : #if 0
2301 : // NOTE(zoeliu): For debug
2302 : if (cm->current_video_frame == FRAME_TO_CHECK && cm->show_frame == 1) {
2303 : const PREDICTION_MODE mode = m->mbmi.mode;
2304 : const int segment_id = m->mbmi.segment_id;
2305 : const BLOCK_SIZE bsize = m->mbmi.sb_type;
2306 :
2307 : // For sub8x8, simply dump out the first sub8x8 block info
2308 : const PREDICTION_MODE b_mode =
2309 : (bsize < BLOCK_8X8) ? m->bmi[0].as_mode : -1;
2310 : const int mv_x = (bsize < BLOCK_8X8) ?
2311 : m->bmi[0].as_mv[0].as_mv.row : m->mbmi.mv[0].as_mv.row;
2312 : const int mv_y = (bsize < BLOCK_8X8) ?
2313 : m->bmi[0].as_mv[0].as_mv.col : m->mbmi.mv[0].as_mv.col;
2314 :
2315 : printf("Before pack_inter_mode_mvs(): "
2316 : "Frame=%d, (mi_row,mi_col)=(%d,%d), "
2317 : "mode=%d, segment_id=%d, bsize=%d, b_mode=%d, "
2318 : "mv[0]=(%d, %d), ref[0]=%d, ref[1]=%d\n",
2319 : cm->current_video_frame, mi_row, mi_col,
2320 : mode, segment_id, bsize, b_mode, mv_x, mv_y,
2321 : m->mbmi.ref_frame[0], m->mbmi.ref_frame[1]);
2322 : }
2323 : #endif // 0
2324 0 : pack_inter_mode_mvs(cpi, mi_row, mi_col,
2325 : #if CONFIG_SUPERTX
2326 : supertx_enabled,
2327 : #endif
2328 : w);
2329 : }
2330 0 : }
2331 :
2332 0 : static void write_tokens_b(AV1_COMP *cpi, const TileInfo *const tile,
2333 : aom_writer *w, const TOKENEXTRA **tok,
2334 : const TOKENEXTRA *const tok_end, int mi_row,
2335 : int mi_col) {
2336 0 : AV1_COMMON *const cm = &cpi->common;
2337 0 : MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
2338 0 : MODE_INFO *const m = xd->mi[0];
2339 0 : MB_MODE_INFO *const mbmi = &m->mbmi;
2340 : int plane;
2341 : int bh, bw;
2342 : #if CONFIG_PVQ || CONFIG_LV_MAP
2343 : MACROBLOCK *const x = &cpi->td.mb;
2344 : (void)tok;
2345 : (void)tok_end;
2346 : #endif
2347 0 : xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
2348 :
2349 0 : assert(mbmi->sb_type <= cm->sb_size);
2350 :
2351 0 : bh = mi_size_high[mbmi->sb_type];
2352 0 : bw = mi_size_wide[mbmi->sb_type];
2353 0 : cpi->td.mb.mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
2354 :
2355 0 : set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw,
2356 : #if CONFIG_DEPENDENT_HORZTILES
2357 : cm->dependent_horz_tiles,
2358 : #endif // CONFIG_DEPENDENT_HORZTILES
2359 : cm->mi_rows, cm->mi_cols);
2360 :
2361 : #if CONFIG_PALETTE
2362 0 : for (plane = 0; plane <= 1; ++plane) {
2363 0 : const uint8_t palette_size_plane =
2364 : mbmi->palette_mode_info.palette_size[plane];
2365 0 : if (palette_size_plane > 0) {
2366 : #if CONFIG_INTRABC
2367 : assert(mbmi->use_intrabc == 0);
2368 : #endif
2369 : int rows, cols;
2370 0 : assert(mbmi->sb_type >= BLOCK_8X8);
2371 0 : av1_get_block_dimensions(mbmi->sb_type, plane, xd, NULL, NULL, &rows,
2372 : &cols);
2373 0 : assert(*tok < tok_end);
2374 0 : pack_palette_tokens(w, tok, palette_size_plane, rows * cols - 1);
2375 0 : assert(*tok < tok_end + mbmi->skip);
2376 : }
2377 : }
2378 : #endif // CONFIG_PALETTE
2379 :
2380 : #if CONFIG_COEF_INTERLEAVE
2381 : if (!mbmi->skip) {
2382 : const struct macroblockd_plane *const pd_y = &xd->plane[0];
2383 : const struct macroblockd_plane *const pd_c = &xd->plane[1];
2384 : const TX_SIZE tx_log2_y = mbmi->tx_size;
2385 : const TX_SIZE tx_log2_c = get_uv_tx_size(mbmi, pd_c);
2386 : const int tx_sz_y = (1 << tx_log2_y);
2387 : const int tx_sz_c = (1 << tx_log2_c);
2388 :
2389 : const BLOCK_SIZE plane_bsize_y =
2390 : get_plane_block_size(AOMMAX(mbmi->sb_type, 3), pd_y);
2391 : const BLOCK_SIZE plane_bsize_c =
2392 : get_plane_block_size(AOMMAX(mbmi->sb_type, 3), pd_c);
2393 :
2394 : const int num_4x4_w_y = num_4x4_blocks_wide_lookup[plane_bsize_y];
2395 : const int num_4x4_w_c = num_4x4_blocks_wide_lookup[plane_bsize_c];
2396 : const int num_4x4_h_y = num_4x4_blocks_high_lookup[plane_bsize_y];
2397 : const int num_4x4_h_c = num_4x4_blocks_high_lookup[plane_bsize_c];
2398 :
2399 : const int max_4x4_w_y = get_max_4x4_size(num_4x4_w_y, xd->mb_to_right_edge,
2400 : pd_y->subsampling_x);
2401 : const int max_4x4_h_y = get_max_4x4_size(num_4x4_h_y, xd->mb_to_bottom_edge,
2402 : pd_y->subsampling_y);
2403 : const int max_4x4_w_c = get_max_4x4_size(num_4x4_w_c, xd->mb_to_right_edge,
2404 : pd_c->subsampling_x);
2405 : const int max_4x4_h_c = get_max_4x4_size(num_4x4_h_c, xd->mb_to_bottom_edge,
2406 : pd_c->subsampling_y);
2407 :
2408 : // The max_4x4_w/h may be smaller than tx_sz under some corner cases,
2409 : // i.e. when the SB is splitted by tile boundaries.
2410 : const int tu_num_w_y = (max_4x4_w_y + tx_sz_y - 1) / tx_sz_y;
2411 : const int tu_num_h_y = (max_4x4_h_y + tx_sz_y - 1) / tx_sz_y;
2412 : const int tu_num_w_c = (max_4x4_w_c + tx_sz_c - 1) / tx_sz_c;
2413 : const int tu_num_h_c = (max_4x4_h_c + tx_sz_c - 1) / tx_sz_c;
2414 : const int tu_num_y = tu_num_w_y * tu_num_h_y;
2415 : const int tu_num_c = tu_num_w_c * tu_num_h_c;
2416 :
2417 : int tu_idx_y = 0, tu_idx_c = 0;
2418 : TOKEN_STATS token_stats;
2419 : init_token_stats(&token_stats);
2420 :
2421 : assert(*tok < tok_end);
2422 :
2423 : while (tu_idx_y < tu_num_y) {
2424 : pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx_log2_y, &token_stats);
2425 : assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2426 : (*tok)++;
2427 : tu_idx_y++;
2428 :
2429 : if (tu_idx_c < tu_num_c) {
2430 : pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx_log2_c, &token_stats);
2431 : assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2432 : (*tok)++;
2433 :
2434 : pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx_log2_c, &token_stats);
2435 : assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2436 : (*tok)++;
2437 :
2438 : tu_idx_c++;
2439 : }
2440 : }
2441 :
2442 : // In 422 case, it's possilbe that Chroma has more TUs than Luma
2443 : while (tu_idx_c < tu_num_c) {
2444 : pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx_log2_c, &token_stats);
2445 : assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2446 : (*tok)++;
2447 :
2448 : pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx_log2_c, &token_stats);
2449 : assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2450 : (*tok)++;
2451 :
2452 : tu_idx_c++;
2453 : }
2454 : }
2455 : #else // CONFIG_COEF_INTERLEAVE
2456 0 : if (!mbmi->skip) {
2457 : #if !CONFIG_PVQ && !CONFIG_LV_MAP
2458 0 : assert(*tok < tok_end);
2459 : #endif
2460 0 : for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
2461 : #if CONFIG_CB4X4
2462 0 : if (!is_chroma_reference(mi_row, mi_col, mbmi->sb_type,
2463 : xd->plane[plane].subsampling_x,
2464 : xd->plane[plane].subsampling_y)) {
2465 0 : (*tok)++;
2466 0 : continue;
2467 : }
2468 : #endif
2469 : #if CONFIG_VAR_TX
2470 0 : const struct macroblockd_plane *const pd = &xd->plane[plane];
2471 0 : BLOCK_SIZE bsize = mbmi->sb_type;
2472 : #if CONFIG_CB4X4
2473 : #if CONFIG_CHROMA_2X2
2474 : const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
2475 : #else
2476 0 : const BLOCK_SIZE plane_bsize =
2477 0 : AOMMAX(BLOCK_4X4, get_plane_block_size(bsize, pd));
2478 : #endif
2479 : #else
2480 : const BLOCK_SIZE plane_bsize =
2481 : get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd);
2482 : #endif
2483 :
2484 0 : const int num_4x4_w =
2485 0 : block_size_wide[plane_bsize] >> tx_size_wide_log2[0];
2486 0 : const int num_4x4_h =
2487 0 : block_size_high[plane_bsize] >> tx_size_wide_log2[0];
2488 : int row, col;
2489 : TOKEN_STATS token_stats;
2490 0 : init_token_stats(&token_stats);
2491 :
2492 0 : if (is_inter_block(mbmi)) {
2493 0 : const TX_SIZE max_tx_size = get_vartx_max_txsize(mbmi, plane_bsize);
2494 0 : int block = 0;
2495 0 : const int step =
2496 0 : tx_size_wide_unit[max_tx_size] * tx_size_high_unit[max_tx_size];
2497 0 : const int bkw = tx_size_wide_unit[max_tx_size];
2498 0 : const int bkh = tx_size_high_unit[max_tx_size];
2499 0 : for (row = 0; row < num_4x4_h; row += bkh) {
2500 0 : for (col = 0; col < num_4x4_w; col += bkw) {
2501 0 : pack_txb_tokens(w,
2502 : #if CONFIG_LV_MAP
2503 : cm,
2504 : #endif
2505 : tok, tok_end,
2506 : #if CONFIG_PVQ || CONFIG_LV_MAP
2507 : x,
2508 : #endif
2509 : xd, mbmi, plane, plane_bsize, cm->bit_depth, block,
2510 : row, col, max_tx_size, &token_stats);
2511 0 : block += step;
2512 : }
2513 : }
2514 : #if CONFIG_RD_DEBUG
2515 : if (mbmi->sb_type >= BLOCK_8X8 &&
2516 : rd_token_stats_mismatch(&mbmi->rd_stats, &token_stats, plane)) {
2517 : dump_mode_info(m);
2518 : assert(0);
2519 : }
2520 : #endif // CONFIG_RD_DEBUG
2521 : } else {
2522 : #if CONFIG_LV_MAP
2523 : av1_write_coeffs_mb(cm, x, w, plane);
2524 : #else
2525 0 : TX_SIZE tx = get_tx_size(plane, xd);
2526 0 : const int bkw = tx_size_wide_unit[tx];
2527 0 : const int bkh = tx_size_high_unit[tx];
2528 0 : for (row = 0; row < num_4x4_h; row += bkh) {
2529 0 : for (col = 0; col < num_4x4_w; col += bkw) {
2530 : #if !CONFIG_PVQ
2531 0 : pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx, &token_stats);
2532 : #else
2533 : pack_pvq_tokens(w, x, xd, plane, bsize, tx);
2534 : #endif
2535 : }
2536 : }
2537 : #endif // CONFIG_LV_MAP
2538 : }
2539 : #else
2540 : TX_SIZE tx = get_tx_size(plane, xd);
2541 : TOKEN_STATS token_stats;
2542 : #if !CONFIG_PVQ
2543 : init_token_stats(&token_stats);
2544 : #if CONFIG_LV_MAP
2545 : (void)tx;
2546 : av1_write_coeffs_mb(cm, x, w, plane);
2547 : #else // CONFIG_LV_MAP
2548 : pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx, &token_stats);
2549 : #endif // CONFIG_LV_MAP
2550 :
2551 : #else
2552 : (void)token_stats;
2553 : pack_pvq_tokens(w, x, xd, plane, mbmi->sb_type, tx);
2554 : #endif
2555 : #if CONFIG_RD_DEBUG
2556 : if (is_inter_block(mbmi) && mbmi->sb_type >= BLOCK_8X8 &&
2557 : rd_token_stats_mismatch(&mbmi->rd_stats, &token_stats, plane)) {
2558 : dump_mode_info(m);
2559 : assert(0);
2560 : }
2561 : #endif // CONFIG_RD_DEBUG
2562 : #endif // CONFIG_VAR_TX
2563 :
2564 : #if !CONFIG_PVQ && !CONFIG_LV_MAP
2565 0 : assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2566 0 : (*tok)++;
2567 : #endif
2568 : }
2569 : }
2570 : #endif // CONFIG_COEF_INTERLEAVE
2571 0 : }
2572 :
2573 : #if CONFIG_MOTION_VAR && CONFIG_NCOBMC
2574 : static void write_tokens_sb(AV1_COMP *cpi, const TileInfo *const tile,
2575 : aom_writer *w, const TOKENEXTRA **tok,
2576 : const TOKENEXTRA *const tok_end, int mi_row,
2577 : int mi_col, BLOCK_SIZE bsize) {
2578 : const AV1_COMMON *const cm = &cpi->common;
2579 : const int hbs = mi_size_wide[bsize] / 2;
2580 : PARTITION_TYPE partition;
2581 : BLOCK_SIZE subsize;
2582 : #if CONFIG_CB4X4
2583 : const int unify_bsize = 1;
2584 : #else
2585 : const int unify_bsize = 0;
2586 : #endif
2587 :
2588 : if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2589 :
2590 : partition = get_partition(cm, mi_row, mi_col, bsize);
2591 : subsize = get_subsize(bsize, partition);
2592 :
2593 : if (subsize < BLOCK_8X8 && !unify_bsize) {
2594 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2595 : } else {
2596 : switch (partition) {
2597 : case PARTITION_NONE:
2598 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2599 : break;
2600 : case PARTITION_HORZ:
2601 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2602 : if (mi_row + hbs < cm->mi_rows)
2603 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col);
2604 : break;
2605 : case PARTITION_VERT:
2606 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2607 : if (mi_col + hbs < cm->mi_cols)
2608 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + hbs);
2609 : break;
2610 : case PARTITION_SPLIT:
2611 : write_tokens_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
2612 : write_tokens_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + hbs,
2613 : subsize);
2614 : write_tokens_sb(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col,
2615 : subsize);
2616 : write_tokens_sb(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col + hbs,
2617 : subsize);
2618 : break;
2619 : #if CONFIG_EXT_PARTITION_TYPES
2620 : case PARTITION_HORZ_A:
2621 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2622 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + hbs);
2623 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col);
2624 : break;
2625 : case PARTITION_HORZ_B:
2626 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2627 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col);
2628 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col + hbs);
2629 : break;
2630 : case PARTITION_VERT_A:
2631 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2632 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col);
2633 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + hbs);
2634 : break;
2635 : case PARTITION_VERT_B:
2636 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2637 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + hbs);
2638 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col + hbs);
2639 : break;
2640 : #endif // CONFIG_EXT_PARTITION_TYPES
2641 : default: assert(0);
2642 : }
2643 : }
2644 : }
2645 : #endif
2646 :
2647 0 : static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
2648 : aom_writer *w, const TOKENEXTRA **tok,
2649 : const TOKENEXTRA *const tok_end,
2650 : #if CONFIG_SUPERTX
2651 : int supertx_enabled,
2652 : #endif
2653 : int mi_row, int mi_col) {
2654 0 : write_mbmi_b(cpi, tile, w,
2655 : #if CONFIG_SUPERTX
2656 : supertx_enabled,
2657 : #endif
2658 : mi_row, mi_col);
2659 : #if CONFIG_MOTION_VAR && CONFIG_NCOBMC
2660 : (void)tok;
2661 : (void)tok_end;
2662 : #else
2663 : #if !CONFIG_PVQ && CONFIG_SUPERTX
2664 : if (!supertx_enabled)
2665 : #endif
2666 0 : write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2667 : #endif
2668 0 : }
2669 :
2670 0 : static void write_partition(const AV1_COMMON *const cm,
2671 : const MACROBLOCKD *const xd, int hbs, int mi_row,
2672 : int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
2673 : aom_writer *w) {
2674 0 : const int has_rows = (mi_row + hbs) < cm->mi_rows;
2675 0 : const int has_cols = (mi_col + hbs) < cm->mi_cols;
2676 0 : const int is_partition_point = bsize >= BLOCK_8X8;
2677 0 : const int ctx = is_partition_point
2678 0 : ? partition_plane_context(xd, mi_row, mi_col,
2679 : #if CONFIG_UNPOISON_PARTITION_CTX
2680 : has_rows, has_cols,
2681 : #endif
2682 : bsize)
2683 0 : : 0;
2684 : #if CONFIG_UNPOISON_PARTITION_CTX
2685 : const aom_prob *const probs =
2686 : ctx < PARTITION_CONTEXTS ? cm->fc->partition_prob[ctx] : NULL;
2687 : #else
2688 0 : const aom_prob *const probs = cm->fc->partition_prob[ctx];
2689 : #endif
2690 :
2691 : #if CONFIG_EC_ADAPT
2692 0 : FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
2693 : (void)cm;
2694 : #else
2695 : FRAME_CONTEXT *ec_ctx = cm->fc;
2696 : #endif
2697 :
2698 0 : if (!is_partition_point) return;
2699 :
2700 0 : if (has_rows && has_cols) {
2701 : #if CONFIG_EXT_PARTITION_TYPES
2702 : if (bsize <= BLOCK_8X8)
2703 : aom_write_symbol(w, p, ec_ctx->partition_cdf[ctx], PARTITION_TYPES);
2704 : else
2705 : aom_write_symbol(w, p, ec_ctx->partition_cdf[ctx], EXT_PARTITION_TYPES);
2706 : #else
2707 0 : aom_write_symbol(w, p, ec_ctx->partition_cdf[ctx], PARTITION_TYPES);
2708 : #endif // CONFIG_EXT_PARTITION_TYPES
2709 0 : } else if (!has_rows && has_cols) {
2710 0 : assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
2711 0 : aom_write(w, p == PARTITION_SPLIT, probs[1]);
2712 0 : } else if (has_rows && !has_cols) {
2713 0 : assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
2714 0 : aom_write(w, p == PARTITION_SPLIT, probs[2]);
2715 : } else {
2716 0 : assert(p == PARTITION_SPLIT);
2717 : }
2718 : }
2719 :
2720 : #if CONFIG_SUPERTX
2721 : #define write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
2722 : mi_row, mi_col, bsize) \
2723 : write_modes_sb(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row, mi_col, \
2724 : bsize)
2725 : #else
2726 : #define write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
2727 : mi_row, mi_col, bsize) \
2728 : write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, bsize)
2729 : #endif // CONFIG_SUPERTX
2730 :
2731 0 : static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile,
2732 : aom_writer *const w, const TOKENEXTRA **tok,
2733 : const TOKENEXTRA *const tok_end,
2734 : #if CONFIG_SUPERTX
2735 : int supertx_enabled,
2736 : #endif
2737 : int mi_row, int mi_col, BLOCK_SIZE bsize) {
2738 0 : const AV1_COMMON *const cm = &cpi->common;
2739 0 : MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
2740 0 : const int hbs = mi_size_wide[bsize] / 2;
2741 0 : const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
2742 0 : const BLOCK_SIZE subsize = get_subsize(bsize, partition);
2743 : #if CONFIG_CB4X4
2744 0 : const int unify_bsize = 1;
2745 : #else
2746 : const int unify_bsize = 0;
2747 : #endif
2748 :
2749 : #if CONFIG_SUPERTX
2750 : const int mi_offset = mi_row * cm->mi_stride + mi_col;
2751 : MB_MODE_INFO *mbmi;
2752 : const int pack_token = !supertx_enabled;
2753 : TX_SIZE supertx_size;
2754 : int plane;
2755 : #endif
2756 :
2757 0 : if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2758 :
2759 0 : write_partition(cm, xd, hbs, mi_row, mi_col, partition, bsize, w);
2760 : #if CONFIG_SUPERTX
2761 : mbmi = &cm->mi_grid_visible[mi_offset]->mbmi;
2762 : xd->mi = cm->mi_grid_visible + mi_offset;
2763 : set_mi_row_col(xd, tile, mi_row, mi_size_high[bsize], mi_col,
2764 : mi_size_wide[bsize],
2765 : #if CONFIG_DEPENDENT_HORZTILES
2766 : cm->dependent_horz_tiles,
2767 : #endif // CONFIG_DEPENDENT_HORZTILES
2768 : cm->mi_rows, cm->mi_cols);
2769 : if (!supertx_enabled && !frame_is_intra_only(cm) &&
2770 : partition != PARTITION_NONE && bsize <= MAX_SUPERTX_BLOCK_SIZE &&
2771 : !xd->lossless[0]) {
2772 : aom_prob prob;
2773 : supertx_size = max_txsize_lookup[bsize];
2774 : prob = cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
2775 : [supertx_size];
2776 : supertx_enabled = (xd->mi[0]->mbmi.tx_size == supertx_size);
2777 : aom_write(w, supertx_enabled, prob);
2778 : }
2779 : #endif // CONFIG_SUPERTX
2780 0 : if (subsize < BLOCK_8X8 && !unify_bsize) {
2781 0 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row,
2782 : mi_col);
2783 : } else {
2784 0 : switch (partition) {
2785 : case PARTITION_NONE:
2786 0 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2787 : mi_row, mi_col);
2788 0 : break;
2789 : case PARTITION_HORZ:
2790 0 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2791 : mi_row, mi_col);
2792 0 : if (mi_row + hbs < cm->mi_rows)
2793 0 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2794 : mi_row + hbs, mi_col);
2795 0 : break;
2796 : case PARTITION_VERT:
2797 0 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2798 : mi_row, mi_col);
2799 0 : if (mi_col + hbs < cm->mi_cols)
2800 0 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2801 : mi_row, mi_col + hbs);
2802 0 : break;
2803 : case PARTITION_SPLIT:
2804 0 : write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2805 : mi_row, mi_col, subsize);
2806 0 : write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2807 : mi_row, mi_col + hbs, subsize);
2808 0 : write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2809 : mi_row + hbs, mi_col, subsize);
2810 0 : write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2811 : mi_row + hbs, mi_col + hbs, subsize);
2812 0 : break;
2813 : #if CONFIG_EXT_PARTITION_TYPES
2814 : case PARTITION_HORZ_A:
2815 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2816 : mi_row, mi_col);
2817 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2818 : mi_row, mi_col + hbs);
2819 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2820 : mi_row + hbs, mi_col);
2821 : break;
2822 : case PARTITION_HORZ_B:
2823 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2824 : mi_row, mi_col);
2825 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2826 : mi_row + hbs, mi_col);
2827 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2828 : mi_row + hbs, mi_col + hbs);
2829 : break;
2830 : case PARTITION_VERT_A:
2831 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2832 : mi_row, mi_col);
2833 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2834 : mi_row + hbs, mi_col);
2835 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2836 : mi_row, mi_col + hbs);
2837 : break;
2838 : case PARTITION_VERT_B:
2839 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2840 : mi_row, mi_col);
2841 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2842 : mi_row, mi_col + hbs);
2843 : write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2844 : mi_row + hbs, mi_col + hbs);
2845 : break;
2846 : #endif // CONFIG_EXT_PARTITION_TYPES
2847 0 : default: assert(0);
2848 : }
2849 : }
2850 : #if CONFIG_SUPERTX
2851 : if (partition != PARTITION_NONE && supertx_enabled && pack_token) {
2852 : int skip;
2853 : const int bsw = mi_size_wide[bsize];
2854 : const int bsh = mi_size_high[bsize];
2855 :
2856 : xd->mi = cm->mi_grid_visible + mi_offset;
2857 : supertx_size = mbmi->tx_size;
2858 : set_mi_row_col(xd, tile, mi_row, bsh, mi_col, bsw,
2859 : #if CONFIG_DEPENDENT_HORZTILES
2860 : cm->dependent_horz_tiles,
2861 : #endif // CONFIG_DEPENDENT_HORZTILES
2862 : cm->mi_rows, cm->mi_cols);
2863 :
2864 : assert(IMPLIES(!cm->seg.enabled, mbmi->segment_id_supertx == 0));
2865 : assert(mbmi->segment_id_supertx < MAX_SEGMENTS);
2866 :
2867 : skip = write_skip(cm, xd, mbmi->segment_id_supertx, xd->mi[0], w);
2868 : #if CONFIG_EXT_TX
2869 : if (get_ext_tx_types(supertx_size, bsize, 1, cm->reduced_tx_set_used) > 1 &&
2870 : !skip) {
2871 : const int eset =
2872 : get_ext_tx_set(supertx_size, bsize, 1, cm->reduced_tx_set_used);
2873 : if (eset > 0) {
2874 : #if CONFIG_EC_ADAPT
2875 : FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
2876 : #else
2877 : FRAME_CONTEXT *ec_ctx = cm->fc;
2878 : #endif
2879 : aom_write_symbol(w, av1_ext_tx_inter_ind[eset][mbmi->tx_type],
2880 : ec_ctx->inter_ext_tx_cdf[eset][supertx_size],
2881 : ext_tx_cnt_inter[eset]);
2882 : }
2883 : }
2884 : #else
2885 : if (supertx_size < TX_32X32 && !skip) {
2886 : av1_write_token(w, av1_ext_tx_tree,
2887 : cm->fc->inter_ext_tx_prob[supertx_size],
2888 : &ext_tx_encodings[mbmi->tx_type]);
2889 : }
2890 : #endif // CONFIG_EXT_TX
2891 :
2892 : if (!skip) {
2893 : assert(*tok < tok_end);
2894 : for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
2895 : const struct macroblockd_plane *const pd = &xd->plane[plane];
2896 : const int mbmi_txb_size = txsize_to_bsize[mbmi->tx_size];
2897 : const BLOCK_SIZE plane_bsize = get_plane_block_size(mbmi_txb_size, pd);
2898 :
2899 : const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
2900 : const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
2901 :
2902 : int row, col;
2903 : TX_SIZE tx = get_tx_size(plane, xd);
2904 : BLOCK_SIZE txb_size = txsize_to_bsize[tx];
2905 :
2906 : const int stepr = tx_size_high_unit[txb_size];
2907 : const int stepc = tx_size_wide_unit[txb_size];
2908 :
2909 : TOKEN_STATS token_stats;
2910 : token_stats.cost = 0;
2911 : for (row = 0; row < max_blocks_high; row += stepr)
2912 : for (col = 0; col < max_blocks_wide; col += stepc)
2913 : pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx, &token_stats);
2914 : assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2915 : (*tok)++;
2916 : }
2917 : }
2918 : #if CONFIG_VAR_TX
2919 : xd->above_txfm_context = cm->above_txfm_context + mi_col;
2920 : xd->left_txfm_context =
2921 : xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
2922 : set_txfm_ctxs(xd->mi[0]->mbmi.tx_size, bsw, bsh, skip, xd);
2923 : #endif
2924 : }
2925 : #endif // CONFIG_SUPERTX
2926 :
2927 : // update partition context
2928 : #if CONFIG_EXT_PARTITION_TYPES
2929 : update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition);
2930 : #else
2931 0 : if (bsize >= BLOCK_8X8 &&
2932 0 : (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
2933 0 : update_partition_context(xd, mi_row, mi_col, subsize, bsize);
2934 : #endif // CONFIG_EXT_PARTITION_TYPES
2935 :
2936 : #if CONFIG_CDEF
2937 0 : if (bsize == cm->sb_size && !sb_all_skip(cm, mi_row, mi_col) &&
2938 0 : cm->cdef_bits != 0) {
2939 0 : aom_write_literal(w, cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]
2940 0 : ->mbmi.cdef_strength,
2941 : cm->cdef_bits);
2942 : }
2943 : #endif
2944 : }
2945 :
2946 0 : static void write_modes(AV1_COMP *const cpi, const TileInfo *const tile,
2947 : aom_writer *const w, const TOKENEXTRA **tok,
2948 : const TOKENEXTRA *const tok_end) {
2949 0 : AV1_COMMON *const cm = &cpi->common;
2950 0 : MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
2951 0 : const int mi_row_start = tile->mi_row_start;
2952 0 : const int mi_row_end = tile->mi_row_end;
2953 0 : const int mi_col_start = tile->mi_col_start;
2954 0 : const int mi_col_end = tile->mi_col_end;
2955 : int mi_row, mi_col;
2956 :
2957 : #if CONFIG_DEPENDENT_HORZTILES
2958 : #if CONFIG_TILE_GROUPS
2959 : if (!cm->dependent_horz_tiles || mi_row_start == 0 ||
2960 : tile->tg_horz_boundary) {
2961 : #else
2962 : if (!cm->dependent_horz_tiles || mi_row_start == 0) {
2963 : #endif
2964 : av1_zero_above_context(cm, mi_col_start, mi_col_end);
2965 : }
2966 : #else
2967 0 : av1_zero_above_context(cm, mi_col_start, mi_col_end);
2968 : #endif
2969 : #if CONFIG_PVQ
2970 : assert(cpi->td.mb.pvq_q->curr_pos == 0);
2971 : #endif
2972 : #if CONFIG_DELTA_Q
2973 0 : if (cpi->common.delta_q_present_flag) {
2974 0 : xd->prev_qindex = cpi->common.base_qindex;
2975 : #if CONFIG_EXT_DELTA_Q
2976 0 : if (cpi->common.delta_lf_present_flag) {
2977 0 : xd->prev_delta_lf_from_base = 0;
2978 : }
2979 : #endif // CONFIG_EXT_DELTA_Q
2980 : }
2981 : #endif
2982 :
2983 0 : for (mi_row = mi_row_start; mi_row < mi_row_end; mi_row += cm->mib_size) {
2984 0 : av1_zero_left_context(xd);
2985 :
2986 0 : for (mi_col = mi_col_start; mi_col < mi_col_end; mi_col += cm->mib_size) {
2987 0 : write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, 0, mi_row, mi_col,
2988 : cm->sb_size);
2989 : #if CONFIG_MOTION_VAR && CONFIG_NCOBMC
2990 : write_tokens_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, cm->sb_size);
2991 : #endif
2992 : }
2993 : }
2994 : #if CONFIG_PVQ
2995 : // Check that the number of PVQ blocks encoded and written to the bitstream
2996 : // are the same
2997 : assert(cpi->td.mb.pvq_q->curr_pos == cpi->td.mb.pvq_q->last_pos);
2998 : // Reset curr_pos in case we repack the bitstream
2999 : cpi->td.mb.pvq_q->curr_pos = 0;
3000 : #endif
3001 0 : }
3002 :
3003 : #if !CONFIG_LV_MAP
3004 : #if !CONFIG_PVQ && !CONFIG_EC_ADAPT
3005 : static void build_tree_distribution(AV1_COMP *cpi, TX_SIZE tx_size,
3006 : av1_coeff_stats *coef_branch_ct,
3007 : av1_coeff_probs_model *coef_probs) {
3008 : av1_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
3009 : unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
3010 : cpi->common.counts.eob_branch[tx_size];
3011 : int i, j, k, l, m;
3012 : #if CONFIG_RECT_TX
3013 : assert(!is_rect_tx(tx_size));
3014 : #endif // CONFIG_RECT_TX
3015 :
3016 : for (i = 0; i < PLANE_TYPES; ++i) {
3017 : for (j = 0; j < REF_TYPES; ++j) {
3018 : for (k = 0; k < COEF_BANDS; ++k) {
3019 : for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
3020 : av1_tree_probs_from_distribution(av1_coef_tree,
3021 : coef_branch_ct[i][j][k][l],
3022 : coef_counts[i][j][k][l]);
3023 : coef_branch_ct[i][j][k][l][0][1] =
3024 : eob_branch_ct[i][j][k][l] - coef_branch_ct[i][j][k][l][0][0];
3025 : for (m = 0; m < UNCONSTRAINED_NODES; ++m)
3026 : coef_probs[i][j][k][l][m] =
3027 : get_binary_prob(coef_branch_ct[i][j][k][l][m][0],
3028 : coef_branch_ct[i][j][k][l][m][1]);
3029 : }
3030 : }
3031 : }
3032 : }
3033 : }
3034 :
3035 : #if !CONFIG_EC_ADAPT
3036 : static void update_coef_probs_common(aom_writer *const bc, AV1_COMP *cpi,
3037 : TX_SIZE tx_size,
3038 : av1_coeff_stats *frame_branch_ct,
3039 : av1_coeff_probs_model *new_coef_probs) {
3040 : av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
3041 : const aom_prob upd = DIFF_UPDATE_PROB;
3042 : #if CONFIG_EC_ADAPT
3043 : const int entropy_nodes_update = UNCONSTRAINED_NODES - 1;
3044 : #else
3045 : const int entropy_nodes_update = UNCONSTRAINED_NODES;
3046 : #endif
3047 : int i, j, k, l, t;
3048 : int stepsize = cpi->sf.coeff_prob_appx_step;
3049 : #if CONFIG_TILE_GROUPS
3050 : const int probwt = cpi->common.num_tg;
3051 : #else
3052 : const int probwt = 1;
3053 : #endif
3054 : #if CONFIG_RECT_TX
3055 : assert(!is_rect_tx(tx_size));
3056 : #endif // CONFIG_RECT_TX
3057 :
3058 : switch (cpi->sf.use_fast_coef_updates) {
3059 : case TWO_LOOP: {
3060 : /* dry run to see if there is any update at all needed */
3061 : int savings = 0;
3062 : int update[2] = { 0, 0 };
3063 : for (i = 0; i < PLANE_TYPES; ++i) {
3064 : for (j = 0; j < REF_TYPES; ++j) {
3065 : for (k = 0; k < COEF_BANDS; ++k) {
3066 : for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
3067 : for (t = 0; t < entropy_nodes_update; ++t) {
3068 : aom_prob newp = new_coef_probs[i][j][k][l][t];
3069 : const aom_prob oldp = old_coef_probs[i][j][k][l][t];
3070 : int s;
3071 : int u = 0;
3072 : if (t == PIVOT_NODE)
3073 : s = av1_prob_diff_update_savings_search_model(
3074 : frame_branch_ct[i][j][k][l][0], oldp, &newp, upd,
3075 : stepsize, probwt);
3076 : else
3077 : s = av1_prob_diff_update_savings_search(
3078 : frame_branch_ct[i][j][k][l][t], oldp, &newp, upd, probwt);
3079 :
3080 : if (s > 0 && newp != oldp) u = 1;
3081 : if (u)
3082 : savings += s - (int)(av1_cost_zero(upd));
3083 : else
3084 : savings -= (int)(av1_cost_zero(upd));
3085 : update[u]++;
3086 : }
3087 : }
3088 : }
3089 : }
3090 : }
3091 :
3092 : /* Is coef updated at all */
3093 : if (update[1] == 0 || savings < 0) {
3094 : aom_write_bit(bc, 0);
3095 : return;
3096 : }
3097 : aom_write_bit(bc, 1);
3098 : for (i = 0; i < PLANE_TYPES; ++i) {
3099 : for (j = 0; j < REF_TYPES; ++j) {
3100 : for (k = 0; k < COEF_BANDS; ++k) {
3101 : for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
3102 : // calc probs and branch cts for this frame only
3103 : for (t = 0; t < entropy_nodes_update; ++t) {
3104 : aom_prob newp = new_coef_probs[i][j][k][l][t];
3105 : aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
3106 : int s;
3107 : int u = 0;
3108 : if (t == PIVOT_NODE)
3109 : s = av1_prob_diff_update_savings_search_model(
3110 : frame_branch_ct[i][j][k][l][0], *oldp, &newp, upd,
3111 : stepsize, probwt);
3112 : else
3113 : s = av1_prob_diff_update_savings_search(
3114 : frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd,
3115 : probwt);
3116 : if (s > 0 && newp != *oldp) u = 1;
3117 : aom_write(bc, u, upd);
3118 : if (u) {
3119 : /* send/use new probability */
3120 : av1_write_prob_diff_update(bc, newp, *oldp);
3121 : *oldp = newp;
3122 : }
3123 : }
3124 : }
3125 : }
3126 : }
3127 : }
3128 : return;
3129 : }
3130 :
3131 : case ONE_LOOP_REDUCED: {
3132 : int updates = 0;
3133 : int noupdates_before_first = 0;
3134 : for (i = 0; i < PLANE_TYPES; ++i) {
3135 : for (j = 0; j < REF_TYPES; ++j) {
3136 : for (k = 0; k < COEF_BANDS; ++k) {
3137 : for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
3138 : // calc probs and branch cts for this frame only
3139 : for (t = 0; t < entropy_nodes_update; ++t) {
3140 : aom_prob newp = new_coef_probs[i][j][k][l][t];
3141 : aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
3142 : int s;
3143 : int u = 0;
3144 : if (t == PIVOT_NODE) {
3145 : s = av1_prob_diff_update_savings_search_model(
3146 : frame_branch_ct[i][j][k][l][0], *oldp, &newp, upd,
3147 : stepsize, probwt);
3148 : } else {
3149 : s = av1_prob_diff_update_savings_search(
3150 : frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd,
3151 : probwt);
3152 : }
3153 :
3154 : if (s > 0 && newp != *oldp) u = 1;
3155 : updates += u;
3156 : if (u == 0 && updates == 0) {
3157 : noupdates_before_first++;
3158 : continue;
3159 : }
3160 : if (u == 1 && updates == 1) {
3161 : int v;
3162 : // first update
3163 : aom_write_bit(bc, 1);
3164 : for (v = 0; v < noupdates_before_first; ++v)
3165 : aom_write(bc, 0, upd);
3166 : }
3167 : aom_write(bc, u, upd);
3168 : if (u) {
3169 : /* send/use new probability */
3170 : av1_write_prob_diff_update(bc, newp, *oldp);
3171 : *oldp = newp;
3172 : }
3173 : }
3174 : }
3175 : }
3176 : }
3177 : }
3178 : if (updates == 0) {
3179 : aom_write_bit(bc, 0); // no updates
3180 : }
3181 : return;
3182 : }
3183 : default: assert(0);
3184 : }
3185 : }
3186 : #endif
3187 :
3188 : #if !CONFIG_EC_ADAPT
3189 : static void update_coef_probs(AV1_COMP *cpi, aom_writer *w) {
3190 : const TX_MODE tx_mode = cpi->common.tx_mode;
3191 : const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
3192 : TX_SIZE tx_size;
3193 :
3194 : for (tx_size = 0; tx_size <= max_tx_size; ++tx_size) {
3195 : av1_coeff_stats frame_branch_ct[PLANE_TYPES];
3196 : av1_coeff_probs_model frame_coef_probs[PLANE_TYPES];
3197 : if (cpi->td.counts->tx_size_totals[tx_size] <= 20 || CONFIG_RD_DEBUG ||
3198 : (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
3199 : aom_write_bit(w, 0);
3200 : } else {
3201 : build_tree_distribution(cpi, tx_size, frame_branch_ct, frame_coef_probs);
3202 : update_coef_probs_common(w, cpi, tx_size, frame_branch_ct,
3203 : frame_coef_probs);
3204 : }
3205 : }
3206 : }
3207 : #endif // !CONFIG_EC_ADAPT
3208 : #endif // !CONFIG_EC_ADAPT
3209 : #endif // !CONFIG_LV_MAP
3210 :
3211 : #if CONFIG_LOOP_RESTORATION
3212 : static void encode_restoration_mode(AV1_COMMON *cm,
3213 : struct aom_write_bit_buffer *wb) {
3214 : int p;
3215 : RestorationInfo *rsi = &cm->rst_info[0];
3216 : switch (rsi->frame_restoration_type) {
3217 : case RESTORE_NONE:
3218 : aom_wb_write_bit(wb, 0);
3219 : aom_wb_write_bit(wb, 0);
3220 : break;
3221 : case RESTORE_WIENER:
3222 : aom_wb_write_bit(wb, 1);
3223 : aom_wb_write_bit(wb, 0);
3224 : break;
3225 : case RESTORE_SGRPROJ:
3226 : aom_wb_write_bit(wb, 1);
3227 : aom_wb_write_bit(wb, 1);
3228 : break;
3229 : case RESTORE_SWITCHABLE:
3230 : aom_wb_write_bit(wb, 0);
3231 : aom_wb_write_bit(wb, 1);
3232 : break;
3233 : default: assert(0);
3234 : }
3235 : for (p = 1; p < MAX_MB_PLANE; ++p) {
3236 : rsi = &cm->rst_info[p];
3237 : switch (rsi->frame_restoration_type) {
3238 : case RESTORE_NONE: aom_wb_write_bit(wb, 0); break;
3239 : case RESTORE_WIENER:
3240 : aom_wb_write_bit(wb, 1);
3241 : aom_wb_write_bit(wb, 0);
3242 : break;
3243 : case RESTORE_SGRPROJ:
3244 : aom_wb_write_bit(wb, 1);
3245 : aom_wb_write_bit(wb, 1);
3246 : break;
3247 : default: assert(0);
3248 : }
3249 : }
3250 : if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
3251 : cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
3252 : cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
3253 : rsi = &cm->rst_info[0];
3254 : aom_wb_write_bit(wb, rsi->restoration_tilesize != RESTORATION_TILESIZE_MAX);
3255 : if (rsi->restoration_tilesize != RESTORATION_TILESIZE_MAX) {
3256 : aom_wb_write_bit(
3257 : wb, rsi->restoration_tilesize != (RESTORATION_TILESIZE_MAX >> 1));
3258 : }
3259 : }
3260 : }
3261 :
3262 : static void write_wiener_filter(WienerInfo *wiener_info,
3263 : WienerInfo *ref_wiener_info, aom_writer *wb) {
3264 : aom_write_primitive_refsubexpfin(
3265 : wb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
3266 : WIENER_FILT_TAP0_SUBEXP_K,
3267 : ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV,
3268 : wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV);
3269 : aom_write_primitive_refsubexpfin(
3270 : wb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
3271 : WIENER_FILT_TAP1_SUBEXP_K,
3272 : ref_wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV,
3273 : wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV);
3274 : aom_write_primitive_refsubexpfin(
3275 : wb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1,
3276 : WIENER_FILT_TAP2_SUBEXP_K,
3277 : ref_wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV,
3278 : wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV);
3279 : aom_write_primitive_refsubexpfin(
3280 : wb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
3281 : WIENER_FILT_TAP0_SUBEXP_K,
3282 : ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV,
3283 : wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV);
3284 : aom_write_primitive_refsubexpfin(
3285 : wb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
3286 : WIENER_FILT_TAP1_SUBEXP_K,
3287 : ref_wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV,
3288 : wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV);
3289 : aom_write_primitive_refsubexpfin(
3290 : wb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1,
3291 : WIENER_FILT_TAP2_SUBEXP_K,
3292 : ref_wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV,
3293 : wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV);
3294 : memcpy(ref_wiener_info, wiener_info, sizeof(*wiener_info));
3295 : }
3296 :
3297 : static void write_sgrproj_filter(SgrprojInfo *sgrproj_info,
3298 : SgrprojInfo *ref_sgrproj_info,
3299 : aom_writer *wb) {
3300 : aom_write_literal(wb, sgrproj_info->ep, SGRPROJ_PARAMS_BITS);
3301 : aom_write_primitive_refsubexpfin(wb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1,
3302 : SGRPROJ_PRJ_SUBEXP_K,
3303 : ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0,
3304 : sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0);
3305 : aom_write_primitive_refsubexpfin(wb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1,
3306 : SGRPROJ_PRJ_SUBEXP_K,
3307 : ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1,
3308 : sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1);
3309 : memcpy(ref_sgrproj_info, sgrproj_info, sizeof(*sgrproj_info));
3310 : }
3311 :
3312 : static void encode_restoration(AV1_COMMON *cm, aom_writer *wb) {
3313 : int i, p;
3314 : const int ntiles = av1_get_rest_ntiles(cm->width, cm->height,
3315 : cm->rst_info[0].restoration_tilesize,
3316 : NULL, NULL, NULL, NULL);
3317 : WienerInfo ref_wiener_info;
3318 : SgrprojInfo ref_sgrproj_info;
3319 : set_default_wiener(&ref_wiener_info);
3320 : set_default_sgrproj(&ref_sgrproj_info);
3321 : const int ntiles_uv = av1_get_rest_ntiles(
3322 : ROUND_POWER_OF_TWO(cm->width, cm->subsampling_x),
3323 : ROUND_POWER_OF_TWO(cm->height, cm->subsampling_y),
3324 : cm->rst_info[1].restoration_tilesize, NULL, NULL, NULL, NULL);
3325 : RestorationInfo *rsi = &cm->rst_info[0];
3326 : if (rsi->frame_restoration_type != RESTORE_NONE) {
3327 : if (rsi->frame_restoration_type == RESTORE_SWITCHABLE) {
3328 : // RESTORE_SWITCHABLE
3329 : for (i = 0; i < ntiles; ++i) {
3330 : av1_write_token(
3331 : wb, av1_switchable_restore_tree, cm->fc->switchable_restore_prob,
3332 : &switchable_restore_encodings[rsi->restoration_type[i]]);
3333 : if (rsi->restoration_type[i] == RESTORE_WIENER) {
3334 : write_wiener_filter(&rsi->wiener_info[i], &ref_wiener_info, wb);
3335 : } else if (rsi->restoration_type[i] == RESTORE_SGRPROJ) {
3336 : write_sgrproj_filter(&rsi->sgrproj_info[i], &ref_sgrproj_info, wb);
3337 : }
3338 : }
3339 : } else if (rsi->frame_restoration_type == RESTORE_WIENER) {
3340 : for (i = 0; i < ntiles; ++i) {
3341 : aom_write(wb, rsi->restoration_type[i] != RESTORE_NONE,
3342 : RESTORE_NONE_WIENER_PROB);
3343 : if (rsi->restoration_type[i] != RESTORE_NONE) {
3344 : write_wiener_filter(&rsi->wiener_info[i], &ref_wiener_info, wb);
3345 : }
3346 : }
3347 : } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) {
3348 : for (i = 0; i < ntiles; ++i) {
3349 : aom_write(wb, rsi->restoration_type[i] != RESTORE_NONE,
3350 : RESTORE_NONE_SGRPROJ_PROB);
3351 : if (rsi->restoration_type[i] != RESTORE_NONE) {
3352 : write_sgrproj_filter(&rsi->sgrproj_info[i], &ref_sgrproj_info, wb);
3353 : }
3354 : }
3355 : }
3356 : }
3357 : for (p = 1; p < MAX_MB_PLANE; ++p) {
3358 : set_default_wiener(&ref_wiener_info);
3359 : set_default_sgrproj(&ref_sgrproj_info);
3360 : rsi = &cm->rst_info[p];
3361 : if (rsi->frame_restoration_type == RESTORE_WIENER) {
3362 : for (i = 0; i < ntiles_uv; ++i) {
3363 : if (ntiles_uv > 1)
3364 : aom_write(wb, rsi->restoration_type[i] != RESTORE_NONE,
3365 : RESTORE_NONE_WIENER_PROB);
3366 : if (rsi->restoration_type[i] != RESTORE_NONE) {
3367 : write_wiener_filter(&rsi->wiener_info[i], &ref_wiener_info, wb);
3368 : }
3369 : }
3370 : } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) {
3371 : for (i = 0; i < ntiles_uv; ++i) {
3372 : if (ntiles_uv > 1)
3373 : aom_write(wb, rsi->restoration_type[i] != RESTORE_NONE,
3374 : RESTORE_NONE_SGRPROJ_PROB);
3375 : if (rsi->restoration_type[i] != RESTORE_NONE) {
3376 : write_sgrproj_filter(&rsi->sgrproj_info[i], &ref_sgrproj_info, wb);
3377 : }
3378 : }
3379 : } else if (rsi->frame_restoration_type != RESTORE_NONE) {
3380 : assert(0);
3381 : }
3382 : }
3383 : }
3384 : #endif // CONFIG_LOOP_RESTORATION
3385 :
3386 0 : static void encode_loopfilter(AV1_COMMON *cm, struct aom_write_bit_buffer *wb) {
3387 : int i;
3388 0 : struct loopfilter *lf = &cm->lf;
3389 :
3390 : // Encode the loop filter level and type
3391 0 : aom_wb_write_literal(wb, lf->filter_level, 6);
3392 0 : aom_wb_write_literal(wb, lf->sharpness_level, 3);
3393 :
3394 : // Write out loop filter deltas applied at the MB level based on mode or
3395 : // ref frame (if they are enabled).
3396 0 : aom_wb_write_bit(wb, lf->mode_ref_delta_enabled);
3397 :
3398 0 : if (lf->mode_ref_delta_enabled) {
3399 0 : aom_wb_write_bit(wb, lf->mode_ref_delta_update);
3400 0 : if (lf->mode_ref_delta_update) {
3401 0 : for (i = 0; i < TOTAL_REFS_PER_FRAME; i++) {
3402 0 : const int delta = lf->ref_deltas[i];
3403 0 : const int changed = delta != lf->last_ref_deltas[i];
3404 0 : aom_wb_write_bit(wb, changed);
3405 0 : if (changed) {
3406 0 : lf->last_ref_deltas[i] = delta;
3407 0 : aom_wb_write_inv_signed_literal(wb, delta, 6);
3408 : }
3409 : }
3410 :
3411 0 : for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
3412 0 : const int delta = lf->mode_deltas[i];
3413 0 : const int changed = delta != lf->last_mode_deltas[i];
3414 0 : aom_wb_write_bit(wb, changed);
3415 0 : if (changed) {
3416 0 : lf->last_mode_deltas[i] = delta;
3417 0 : aom_wb_write_inv_signed_literal(wb, delta, 6);
3418 : }
3419 : }
3420 : }
3421 : }
3422 0 : }
3423 :
3424 : #if CONFIG_CDEF
3425 0 : static void encode_cdef(const AV1_COMMON *cm, struct aom_write_bit_buffer *wb) {
3426 : int i;
3427 0 : aom_wb_write_literal(wb, cm->cdef_dering_damping - 5, 1);
3428 0 : aom_wb_write_literal(wb, cm->cdef_clpf_damping - 3, 2);
3429 0 : aom_wb_write_literal(wb, cm->cdef_bits, 2);
3430 0 : for (i = 0; i < cm->nb_cdef_strengths; i++) {
3431 0 : aom_wb_write_literal(wb, cm->cdef_strengths[i], CDEF_STRENGTH_BITS);
3432 0 : aom_wb_write_literal(wb, cm->cdef_uv_strengths[i], CDEF_STRENGTH_BITS);
3433 : }
3434 0 : }
3435 : #endif
3436 :
3437 0 : static void write_delta_q(struct aom_write_bit_buffer *wb, int delta_q) {
3438 0 : if (delta_q != 0) {
3439 0 : aom_wb_write_bit(wb, 1);
3440 0 : aom_wb_write_inv_signed_literal(wb, delta_q, 6);
3441 : } else {
3442 0 : aom_wb_write_bit(wb, 0);
3443 : }
3444 0 : }
3445 :
3446 0 : static void encode_quantization(const AV1_COMMON *const cm,
3447 : struct aom_write_bit_buffer *wb) {
3448 0 : aom_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
3449 0 : write_delta_q(wb, cm->y_dc_delta_q);
3450 0 : write_delta_q(wb, cm->uv_dc_delta_q);
3451 0 : write_delta_q(wb, cm->uv_ac_delta_q);
3452 : #if CONFIG_AOM_QM
3453 : aom_wb_write_bit(wb, cm->using_qmatrix);
3454 : if (cm->using_qmatrix) {
3455 : aom_wb_write_literal(wb, cm->min_qmlevel, QM_LEVEL_BITS);
3456 : aom_wb_write_literal(wb, cm->max_qmlevel, QM_LEVEL_BITS);
3457 : }
3458 : #endif
3459 0 : }
3460 :
3461 0 : static void encode_segmentation(AV1_COMMON *cm, MACROBLOCKD *xd,
3462 : struct aom_write_bit_buffer *wb) {
3463 : int i, j;
3464 0 : const struct segmentation *seg = &cm->seg;
3465 :
3466 0 : aom_wb_write_bit(wb, seg->enabled);
3467 0 : if (!seg->enabled) return;
3468 :
3469 : // Segmentation map
3470 0 : if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
3471 0 : aom_wb_write_bit(wb, seg->update_map);
3472 : } else {
3473 0 : assert(seg->update_map == 1);
3474 : }
3475 0 : if (seg->update_map) {
3476 : // Select the coding strategy (temporal or spatial)
3477 0 : av1_choose_segmap_coding_method(cm, xd);
3478 :
3479 : // Write out the chosen coding method.
3480 0 : if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
3481 0 : aom_wb_write_bit(wb, seg->temporal_update);
3482 : } else {
3483 0 : assert(seg->temporal_update == 0);
3484 : }
3485 : }
3486 :
3487 : // Segmentation data
3488 0 : aom_wb_write_bit(wb, seg->update_data);
3489 0 : if (seg->update_data) {
3490 0 : aom_wb_write_bit(wb, seg->abs_delta);
3491 :
3492 0 : for (i = 0; i < MAX_SEGMENTS; i++) {
3493 0 : for (j = 0; j < SEG_LVL_MAX; j++) {
3494 0 : const int active = segfeature_active(seg, i, j);
3495 0 : aom_wb_write_bit(wb, active);
3496 0 : if (active) {
3497 0 : const int data = get_segdata(seg, i, j);
3498 0 : const int data_max = av1_seg_feature_data_max(j);
3499 :
3500 0 : if (av1_is_segfeature_signed(j)) {
3501 0 : encode_unsigned_max(wb, abs(data), data_max);
3502 0 : aom_wb_write_bit(wb, data < 0);
3503 : } else {
3504 0 : encode_unsigned_max(wb, data, data_max);
3505 : }
3506 : }
3507 : }
3508 : }
3509 : }
3510 : }
3511 :
3512 : #if !CONFIG_EC_ADAPT
3513 : static void update_seg_probs(AV1_COMP *cpi, aom_writer *w) {
3514 : AV1_COMMON *cm = &cpi->common;
3515 : #if CONFIG_TILE_GROUPS
3516 : const int probwt = cm->num_tg;
3517 : #else
3518 : const int probwt = 1;
3519 : #endif
3520 :
3521 : if (!cm->seg.enabled || !cm->seg.update_map) return;
3522 :
3523 : if (cm->seg.temporal_update) {
3524 : int i;
3525 :
3526 : for (i = 0; i < PREDICTION_PROBS; i++)
3527 : av1_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
3528 : cm->counts.seg.pred[i], probwt);
3529 :
3530 : prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
3531 : cm->counts.seg.tree_mispred, MAX_SEGMENTS, probwt, w);
3532 : } else {
3533 : prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
3534 : cm->counts.seg.tree_total, MAX_SEGMENTS, probwt, w);
3535 : }
3536 : }
3537 : #endif
3538 :
3539 0 : static void write_tx_mode(AV1_COMMON *cm, MACROBLOCKD *xd, TX_MODE *mode,
3540 : struct aom_write_bit_buffer *wb) {
3541 0 : int i, all_lossless = 1;
3542 :
3543 0 : if (cm->seg.enabled) {
3544 0 : for (i = 0; i < MAX_SEGMENTS; ++i) {
3545 0 : if (!xd->lossless[i]) {
3546 0 : all_lossless = 0;
3547 0 : break;
3548 : }
3549 : }
3550 : } else {
3551 0 : all_lossless = xd->lossless[0];
3552 : }
3553 0 : if (all_lossless) {
3554 0 : *mode = ONLY_4X4;
3555 0 : return;
3556 : }
3557 : #if CONFIG_TX64X64
3558 : aom_wb_write_bit(wb, *mode == TX_MODE_SELECT);
3559 : if (*mode != TX_MODE_SELECT) {
3560 : aom_wb_write_literal(wb, AOMMIN(*mode, ALLOW_32X32), 2);
3561 : if (*mode >= ALLOW_32X32) aom_wb_write_bit(wb, *mode == ALLOW_64X64);
3562 : }
3563 : #else
3564 0 : aom_wb_write_bit(wb, *mode == TX_MODE_SELECT);
3565 0 : if (*mode != TX_MODE_SELECT) aom_wb_write_literal(wb, *mode, 2);
3566 : #endif // CONFIG_TX64X64
3567 : }
3568 :
3569 : #if !CONFIG_EC_ADAPT
3570 : static void update_txfm_probs(AV1_COMMON *cm, aom_writer *w,
3571 : FRAME_COUNTS *counts) {
3572 : #if CONFIG_TILE_GROUPS
3573 : const int probwt = cm->num_tg;
3574 : #else
3575 : const int probwt = 1;
3576 : #endif
3577 : if (cm->tx_mode == TX_MODE_SELECT) {
3578 : int i, j;
3579 : for (i = 0; i < MAX_TX_DEPTH; ++i)
3580 : for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
3581 : prob_diff_update(av1_tx_size_tree[i], cm->fc->tx_size_probs[i][j],
3582 : counts->tx_size[i][j], i + 2, probwt, w);
3583 : }
3584 : }
3585 : #endif
3586 :
3587 0 : static void write_frame_interp_filter(InterpFilter filter,
3588 : struct aom_write_bit_buffer *wb) {
3589 0 : aom_wb_write_bit(wb, filter == SWITCHABLE);
3590 0 : if (filter != SWITCHABLE)
3591 0 : aom_wb_write_literal(wb, filter, LOG_SWITCHABLE_FILTERS);
3592 0 : }
3593 :
3594 0 : static void fix_interp_filter(AV1_COMMON *cm, FRAME_COUNTS *counts) {
3595 0 : if (cm->interp_filter == SWITCHABLE) {
3596 : // Check to see if only one of the filters is actually used
3597 : int count[SWITCHABLE_FILTERS];
3598 0 : int i, j, c = 0;
3599 0 : for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
3600 0 : count[i] = 0;
3601 0 : for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
3602 0 : count[i] += counts->switchable_interp[j][i];
3603 0 : c += (count[i] > 0);
3604 : }
3605 0 : if (c == 1) {
3606 : // Only one filter is used. So set the filter at frame level
3607 0 : for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
3608 0 : if (count[i]) {
3609 : #if CONFIG_MOTION_VAR && (CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION)
3610 : #if CONFIG_WARPED_MOTION
3611 0 : if (i == EIGHTTAP_REGULAR || WARP_WM_NEIGHBORS_WITH_OBMC)
3612 : #else
3613 : if (i == EIGHTTAP_REGULAR || WARP_GM_NEIGHBORS_WITH_OBMC)
3614 : #endif // CONFIG_WARPED_MOTION
3615 : #endif // CONFIG_MOTION_VAR && (CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION)
3616 0 : cm->interp_filter = i;
3617 0 : break;
3618 : }
3619 : }
3620 : }
3621 : }
3622 0 : }
3623 :
3624 0 : static void write_tile_info(const AV1_COMMON *const cm,
3625 : struct aom_write_bit_buffer *wb) {
3626 : #if CONFIG_EXT_TILE
3627 : const int tile_width =
3628 : ALIGN_POWER_OF_TWO(cm->tile_width, cm->mib_size_log2) >>
3629 : cm->mib_size_log2;
3630 : const int tile_height =
3631 : ALIGN_POWER_OF_TWO(cm->tile_height, cm->mib_size_log2) >>
3632 : cm->mib_size_log2;
3633 :
3634 : assert(tile_width > 0);
3635 : assert(tile_height > 0);
3636 :
3637 : aom_wb_write_literal(wb, cm->tile_encoding_mode, 1);
3638 :
3639 : // Write the tile sizes
3640 : #if CONFIG_EXT_PARTITION
3641 : if (cm->sb_size == BLOCK_128X128) {
3642 : assert(tile_width <= 32);
3643 : assert(tile_height <= 32);
3644 : aom_wb_write_literal(wb, tile_width - 1, 5);
3645 : aom_wb_write_literal(wb, tile_height - 1, 5);
3646 : } else
3647 : #endif // CONFIG_EXT_PARTITION
3648 : {
3649 : assert(tile_width <= 64);
3650 : assert(tile_height <= 64);
3651 : aom_wb_write_literal(wb, tile_width - 1, 6);
3652 : aom_wb_write_literal(wb, tile_height - 1, 6);
3653 : }
3654 : #if CONFIG_DEPENDENT_HORZTILES
3655 : if (tile_height > 1) aom_wb_write_bit(wb, cm->dependent_horz_tiles);
3656 : #endif
3657 : #else
3658 : int min_log2_tile_cols, max_log2_tile_cols, ones;
3659 0 : av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
3660 :
3661 : // columns
3662 0 : ones = cm->log2_tile_cols - min_log2_tile_cols;
3663 0 : while (ones--) aom_wb_write_bit(wb, 1);
3664 :
3665 0 : if (cm->log2_tile_cols < max_log2_tile_cols) aom_wb_write_bit(wb, 0);
3666 :
3667 : // rows
3668 0 : aom_wb_write_bit(wb, cm->log2_tile_rows != 0);
3669 0 : if (cm->log2_tile_rows != 0) aom_wb_write_bit(wb, cm->log2_tile_rows != 1);
3670 : #if CONFIG_DEPENDENT_HORZTILES
3671 : if (cm->log2_tile_rows != 0) aom_wb_write_bit(wb, cm->dependent_horz_tiles);
3672 : #endif
3673 : #endif // CONFIG_EXT_TILE
3674 :
3675 : #if CONFIG_LOOPFILTERING_ACROSS_TILES
3676 0 : aom_wb_write_bit(wb, cm->loop_filter_across_tiles_enabled);
3677 : #endif // CONFIG_LOOPFILTERING_ACROSS_TILES
3678 0 : }
3679 :
3680 0 : static int get_refresh_mask(AV1_COMP *cpi) {
3681 0 : int refresh_mask = 0;
3682 :
3683 : #if CONFIG_EXT_REFS
3684 : // NOTE(zoeliu): When LAST_FRAME is to get refreshed, the decoder will be
3685 : // notified to get LAST3_FRAME refreshed and then the virtual indexes for all
3686 : // the 3 LAST reference frames will be updated accordingly, i.e.:
3687 : // (1) The original virtual index for LAST3_FRAME will become the new virtual
3688 : // index for LAST_FRAME; and
3689 : // (2) The original virtual indexes for LAST_FRAME and LAST2_FRAME will be
3690 : // shifted and become the new virtual indexes for LAST2_FRAME and
3691 : // LAST3_FRAME.
3692 0 : refresh_mask |=
3693 0 : (cpi->refresh_last_frame << cpi->lst_fb_idxes[LAST_REF_FRAMES - 1]);
3694 0 : if (cpi->rc.is_bwd_ref_frame && cpi->num_extra_arfs) {
3695 : // We have swapped the virtual indices
3696 0 : refresh_mask |= (cpi->refresh_bwd_ref_frame << cpi->arf_map[0]);
3697 : } else {
3698 0 : refresh_mask |= (cpi->refresh_bwd_ref_frame << cpi->bwd_fb_idx);
3699 : }
3700 : #else
3701 : refresh_mask |= (cpi->refresh_last_frame << cpi->lst_fb_idx);
3702 : #endif // CONFIG_EXT_REFS
3703 :
3704 0 : if (av1_preserve_existing_gf(cpi)) {
3705 : // We have decided to preserve the previously existing golden frame as our
3706 : // new ARF frame. However, in the short term we leave it in the GF slot and,
3707 : // if we're updating the GF with the current decoded frame, we save it
3708 : // instead to the ARF slot.
3709 : // Later, in the function av1_encoder.c:av1_update_reference_frames() we
3710 : // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
3711 : // there so that it can be done outside of the recode loop.
3712 : // Note: This is highly specific to the use of ARF as a forward reference,
3713 : // and this needs to be generalized as other uses are implemented
3714 : // (like RTC/temporal scalability).
3715 0 : return refresh_mask | (cpi->refresh_golden_frame << cpi->alt_fb_idx);
3716 : } else {
3717 : #if CONFIG_EXT_REFS
3718 0 : const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
3719 0 : int arf_idx = cpi->arf_map[gf_group->arf_update_idx[gf_group->index]];
3720 : #else
3721 : int arf_idx = cpi->alt_fb_idx;
3722 : if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
3723 : const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
3724 : arf_idx = gf_group->arf_update_idx[gf_group->index];
3725 : }
3726 : #endif // CONFIG_EXT_REFS
3727 0 : return refresh_mask | (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
3728 0 : (cpi->refresh_alt_ref_frame << arf_idx);
3729 : }
3730 : }
3731 :
3732 : #if CONFIG_EXT_TILE
3733 : static INLINE int find_identical_tile(
3734 : const int tile_row, const int tile_col,
3735 : TileBufferEnc (*const tile_buffers)[1024]) {
3736 : const MV32 candidate_offset[1] = { { 1, 0 } };
3737 : const uint8_t *const cur_tile_data =
3738 : tile_buffers[tile_row][tile_col].data + 4;
3739 : const size_t cur_tile_size = tile_buffers[tile_row][tile_col].size;
3740 :
3741 : int i;
3742 :
3743 : if (tile_row == 0) return 0;
3744 :
3745 : // (TODO: yunqingwang) For now, only above tile is checked and used.
3746 : // More candidates such as left tile can be added later.
3747 : for (i = 0; i < 1; i++) {
3748 : int row_offset = candidate_offset[0].row;
3749 : int col_offset = candidate_offset[0].col;
3750 : int row = tile_row - row_offset;
3751 : int col = tile_col - col_offset;
3752 : uint8_t tile_hdr;
3753 : const uint8_t *tile_data;
3754 : TileBufferEnc *candidate;
3755 :
3756 : if (row < 0 || col < 0) continue;
3757 :
3758 : tile_hdr = *(tile_buffers[row][col].data);
3759 :
3760 : // Read out tcm bit
3761 : if ((tile_hdr >> 7) == 1) {
3762 : // The candidate is a copy tile itself
3763 : row_offset += tile_hdr & 0x7f;
3764 : row = tile_row - row_offset;
3765 : }
3766 :
3767 : candidate = &tile_buffers[row][col];
3768 :
3769 : if (row_offset >= 128 || candidate->size != cur_tile_size) continue;
3770 :
3771 : tile_data = candidate->data + 4;
3772 :
3773 : if (memcmp(tile_data, cur_tile_data, cur_tile_size) != 0) continue;
3774 :
3775 : // Identical tile found
3776 : assert(row_offset > 0);
3777 : return row_offset;
3778 : }
3779 :
3780 : // No identical tile found
3781 : return 0;
3782 : }
3783 : #endif // CONFIG_EXT_TILE
3784 :
3785 : #if CONFIG_TILE_GROUPS
3786 0 : static uint32_t write_tiles(AV1_COMP *const cpi,
3787 : struct aom_write_bit_buffer *wb,
3788 : unsigned int *max_tile_size,
3789 : unsigned int *max_tile_col_size) {
3790 : #else
3791 : static uint32_t write_tiles(AV1_COMP *const cpi, uint8_t *const dst,
3792 : unsigned int *max_tile_size,
3793 : unsigned int *max_tile_col_size) {
3794 : #endif
3795 0 : const AV1_COMMON *const cm = &cpi->common;
3796 : #if CONFIG_ANS
3797 : struct BufAnsCoder *buf_ans = &cpi->buf_ans;
3798 : #else
3799 : aom_writer mode_bc;
3800 : #endif // CONFIG_ANS
3801 : int tile_row, tile_col;
3802 0 : TOKENEXTRA *(*const tok_buffers)[MAX_TILE_COLS] = cpi->tile_tok;
3803 0 : TileBufferEnc(*const tile_buffers)[MAX_TILE_COLS] = cpi->tile_buffers;
3804 0 : uint32_t total_size = 0;
3805 0 : const int tile_cols = cm->tile_cols;
3806 0 : const int tile_rows = cm->tile_rows;
3807 0 : unsigned int tile_size = 0;
3808 : #if CONFIG_TILE_GROUPS
3809 0 : const int n_log2_tiles = cm->log2_tile_rows + cm->log2_tile_cols;
3810 0 : const int have_tiles = n_log2_tiles > 0;
3811 : uint32_t comp_hdr_size;
3812 : // Fixed size tile groups for the moment
3813 0 : const int num_tg_hdrs = cm->num_tg;
3814 0 : const int tg_size = (tile_rows * tile_cols + num_tg_hdrs - 1) / num_tg_hdrs;
3815 0 : int tile_count = 0;
3816 0 : int tg_count = 1;
3817 0 : int tile_size_bytes = 4;
3818 : int tile_col_size_bytes;
3819 0 : uint32_t uncompressed_hdr_size = 0;
3820 0 : uint8_t *dst = NULL;
3821 : struct aom_write_bit_buffer comp_hdr_len_wb;
3822 : struct aom_write_bit_buffer tg_params_wb;
3823 : struct aom_write_bit_buffer tile_size_bytes_wb;
3824 : uint32_t saved_offset;
3825 0 : int mtu_size = cpi->oxcf.mtu;
3826 0 : int curr_tg_data_size = 0;
3827 : int hdr_size;
3828 : #endif
3829 : #if CONFIG_EXT_TILE
3830 : const int have_tiles = tile_cols * tile_rows > 1;
3831 : #endif // CONFIG_EXT_TILE
3832 :
3833 0 : *max_tile_size = 0;
3834 0 : *max_tile_col_size = 0;
3835 :
3836 : // All tile size fields are output on 4 bytes. A call to remux_tiles will
3837 : // later compact the data if smaller headers are adequate.
3838 :
3839 : #if CONFIG_EXT_TILE
3840 : for (tile_col = 0; tile_col < tile_cols; tile_col++) {
3841 : TileInfo tile_info;
3842 : const int is_last_col = (tile_col == tile_cols - 1);
3843 : const uint32_t col_offset = total_size;
3844 :
3845 : av1_tile_set_col(&tile_info, cm, tile_col);
3846 :
3847 : // The last column does not have a column header
3848 : if (!is_last_col) total_size += 4;
3849 :
3850 : for (tile_row = 0; tile_row < tile_rows; tile_row++) {
3851 : TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
3852 : const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
3853 : const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
3854 : const int data_offset = have_tiles ? 4 : 0;
3855 : #if CONFIG_EC_ADAPT
3856 : const int tile_idx = tile_row * tile_cols + tile_col;
3857 : TileDataEnc *this_tile = &cpi->tile_data[tile_idx];
3858 : #endif
3859 : av1_tile_set_row(&tile_info, cm, tile_row);
3860 :
3861 : buf->data = dst + total_size;
3862 :
3863 : // Is CONFIG_EXT_TILE = 1, every tile in the row has a header,
3864 : // even for the last one, unless no tiling is used at all.
3865 : total_size += data_offset;
3866 : #if CONFIG_EC_ADAPT
3867 : // Initialise tile context from the frame context
3868 : this_tile->tctx = *cm->fc;
3869 : cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
3870 : #endif
3871 : #if CONFIG_PVQ
3872 : cpi->td.mb.pvq_q = &this_tile->pvq_q;
3873 : cpi->td.mb.daala_enc.state.adapt = &this_tile->tctx.pvq_context;
3874 : #endif // CONFIG_PVQ
3875 : #if !CONFIG_ANS
3876 : aom_start_encode(&mode_bc, buf->data + data_offset);
3877 : write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
3878 : assert(tok == tok_end);
3879 : aom_stop_encode(&mode_bc);
3880 : tile_size = mode_bc.pos;
3881 : #else
3882 : buf_ans_write_init(buf_ans, buf->data + data_offset);
3883 : write_modes(cpi, &tile_info, buf_ans, &tok, tok_end);
3884 : assert(tok == tok_end);
3885 : aom_buf_ans_flush(buf_ans);
3886 : tile_size = buf_ans_write_end(buf_ans);
3887 : #endif // !CONFIG_ANS
3888 : #if CONFIG_PVQ
3889 : cpi->td.mb.pvq_q = NULL;
3890 : #endif
3891 : buf->size = tile_size;
3892 :
3893 : // Record the maximum tile size we see, so we can compact headers later.
3894 : *max_tile_size = AOMMAX(*max_tile_size, tile_size);
3895 :
3896 : if (have_tiles) {
3897 : // tile header: size of this tile, or copy offset
3898 : uint32_t tile_header = tile_size;
3899 :
3900 : // If the tile_encoding_mode is 1 (i.e. TILE_VR), check if this tile is
3901 : // a copy tile.
3902 : // Very low chances to have copy tiles on the key frames, so don't
3903 : // search on key frames to reduce unnecessary search.
3904 : if (cm->frame_type != KEY_FRAME && cm->tile_encoding_mode) {
3905 : const int idendical_tile_offset =
3906 : find_identical_tile(tile_row, tile_col, tile_buffers);
3907 :
3908 : if (idendical_tile_offset > 0) {
3909 : tile_size = 0;
3910 : tile_header = idendical_tile_offset | 0x80;
3911 : tile_header <<= 24;
3912 : }
3913 : }
3914 :
3915 : mem_put_le32(buf->data, tile_header);
3916 : }
3917 :
3918 : total_size += tile_size;
3919 : }
3920 :
3921 : if (!is_last_col) {
3922 : uint32_t col_size = total_size - col_offset - 4;
3923 : mem_put_le32(dst + col_offset, col_size);
3924 :
3925 : // If it is not final packing, record the maximum tile column size we see,
3926 : // otherwise, check if the tile size is out of the range.
3927 : *max_tile_col_size = AOMMAX(*max_tile_col_size, col_size);
3928 : }
3929 : }
3930 : #else
3931 : #if CONFIG_TILE_GROUPS
3932 0 : write_uncompressed_header(cpi, wb);
3933 :
3934 : #if CONFIG_EXT_REFS
3935 0 : if (cm->show_existing_frame) {
3936 0 : total_size = aom_wb_bytes_written(wb);
3937 0 : return (uint32_t)total_size;
3938 : }
3939 : #endif // CONFIG_EXT_REFS
3940 :
3941 : // Write the tile length code
3942 0 : tile_size_bytes_wb = *wb;
3943 0 : aom_wb_write_literal(wb, 3, 2);
3944 :
3945 : /* Write a placeholder for the number of tiles in each tile group */
3946 0 : tg_params_wb = *wb;
3947 0 : saved_offset = wb->bit_offset;
3948 0 : if (have_tiles) {
3949 0 : aom_wb_overwrite_literal(wb, 3, n_log2_tiles);
3950 0 : aom_wb_overwrite_literal(wb, (1 << n_log2_tiles) - 1, n_log2_tiles);
3951 : }
3952 :
3953 : /* Write a placeholder for the compressed header length */
3954 0 : comp_hdr_len_wb = *wb;
3955 0 : aom_wb_write_literal(wb, 0, 16);
3956 :
3957 0 : uncompressed_hdr_size = aom_wb_bytes_written(wb);
3958 0 : dst = wb->bit_buffer;
3959 0 : comp_hdr_size = write_compressed_header(cpi, dst + uncompressed_hdr_size);
3960 0 : aom_wb_overwrite_literal(&comp_hdr_len_wb, (int)(comp_hdr_size), 16);
3961 0 : hdr_size = uncompressed_hdr_size + comp_hdr_size;
3962 0 : total_size += hdr_size;
3963 : #endif
3964 :
3965 0 : for (tile_row = 0; tile_row < tile_rows; tile_row++) {
3966 : TileInfo tile_info;
3967 0 : const int is_last_row = (tile_row == tile_rows - 1);
3968 0 : av1_tile_set_row(&tile_info, cm, tile_row);
3969 :
3970 0 : for (tile_col = 0; tile_col < tile_cols; tile_col++) {
3971 0 : const int tile_idx = tile_row * tile_cols + tile_col;
3972 0 : TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
3973 : #if CONFIG_PVQ || CONFIG_EC_ADAPT
3974 0 : TileDataEnc *this_tile = &cpi->tile_data[tile_idx];
3975 : #endif
3976 0 : const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
3977 0 : const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
3978 0 : const int is_last_col = (tile_col == tile_cols - 1);
3979 0 : const int is_last_tile = is_last_col && is_last_row;
3980 : #if !CONFIG_TILE_GROUPS
3981 : (void)tile_idx;
3982 : #else
3983 :
3984 0 : if ((!mtu_size && tile_count > tg_size) ||
3985 0 : (mtu_size && tile_count && curr_tg_data_size >= mtu_size)) {
3986 : // New tile group
3987 0 : tg_count++;
3988 : // We've exceeded the packet size
3989 0 : if (tile_count > 1) {
3990 : /* The last tile exceeded the packet size. The tile group size
3991 : should therefore be tile_count-1.
3992 : Move the last tile and insert headers before it
3993 : */
3994 0 : uint32_t old_total_size = total_size - tile_size - 4;
3995 0 : memmove(dst + old_total_size + hdr_size, dst + old_total_size,
3996 0 : (tile_size + 4) * sizeof(uint8_t));
3997 : // Copy uncompressed header
3998 0 : memmove(dst + old_total_size, dst,
3999 : uncompressed_hdr_size * sizeof(uint8_t));
4000 : // Write the number of tiles in the group into the last uncompressed
4001 : // header before the one we've just inserted
4002 0 : aom_wb_overwrite_literal(&tg_params_wb, tile_idx - tile_count,
4003 : n_log2_tiles);
4004 0 : aom_wb_overwrite_literal(&tg_params_wb, tile_count - 2, n_log2_tiles);
4005 : // Update the pointer to the last TG params
4006 0 : tg_params_wb.bit_offset = saved_offset + 8 * old_total_size;
4007 : // Copy compressed header
4008 0 : memmove(dst + old_total_size + uncompressed_hdr_size,
4009 0 : dst + uncompressed_hdr_size, comp_hdr_size * sizeof(uint8_t));
4010 0 : total_size += hdr_size;
4011 0 : tile_count = 1;
4012 0 : curr_tg_data_size = hdr_size + tile_size + 4;
4013 :
4014 : } else {
4015 : // We exceeded the packet size in just one tile
4016 : // Copy uncompressed header
4017 0 : memmove(dst + total_size, dst,
4018 : uncompressed_hdr_size * sizeof(uint8_t));
4019 : // Write the number of tiles in the group into the last uncompressed
4020 : // header
4021 0 : aom_wb_overwrite_literal(&tg_params_wb, tile_idx - tile_count,
4022 : n_log2_tiles);
4023 0 : aom_wb_overwrite_literal(&tg_params_wb, tile_count - 1, n_log2_tiles);
4024 0 : tg_params_wb.bit_offset = saved_offset + 8 * total_size;
4025 : // Copy compressed header
4026 0 : memmove(dst + total_size + uncompressed_hdr_size,
4027 0 : dst + uncompressed_hdr_size, comp_hdr_size * sizeof(uint8_t));
4028 0 : total_size += hdr_size;
4029 0 : tile_count = 0;
4030 0 : curr_tg_data_size = hdr_size;
4031 : }
4032 : }
4033 0 : tile_count++;
4034 : #endif
4035 0 : av1_tile_set_col(&tile_info, cm, tile_col);
4036 :
4037 : #if CONFIG_DEPENDENT_HORZTILES && CONFIG_TILE_GROUPS
4038 : av1_tile_set_tg_boundary(&tile_info, cm, tile_row, tile_col);
4039 : #endif
4040 0 : buf->data = dst + total_size;
4041 :
4042 : // The last tile does not have a header.
4043 0 : if (!is_last_tile) total_size += 4;
4044 :
4045 : #if CONFIG_EC_ADAPT
4046 : // Initialise tile context from the frame context
4047 0 : this_tile->tctx = *cm->fc;
4048 0 : cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
4049 : #endif
4050 : #if CONFIG_PVQ
4051 : cpi->td.mb.pvq_q = &this_tile->pvq_q;
4052 : cpi->td.mb.daala_enc.state.adapt = &this_tile->tctx.pvq_context;
4053 : #endif // CONFIG_PVQ
4054 : #if CONFIG_ANS
4055 : buf_ans_write_init(buf_ans, dst + total_size);
4056 : write_modes(cpi, &tile_info, buf_ans, &tok, tok_end);
4057 : assert(tok == tok_end);
4058 : aom_buf_ans_flush(buf_ans);
4059 : tile_size = buf_ans_write_end(buf_ans);
4060 : #else
4061 0 : aom_start_encode(&mode_bc, dst + total_size);
4062 0 : write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
4063 : #if !CONFIG_LV_MAP
4064 0 : assert(tok == tok_end);
4065 : #endif // !CONFIG_LV_MAP
4066 0 : aom_stop_encode(&mode_bc);
4067 0 : tile_size = mode_bc.pos;
4068 : #endif // CONFIG_ANS
4069 : #if CONFIG_PVQ
4070 : cpi->td.mb.pvq_q = NULL;
4071 : #endif
4072 :
4073 0 : assert(tile_size > 0);
4074 :
4075 : #if CONFIG_TILE_GROUPS
4076 0 : curr_tg_data_size += tile_size + 4;
4077 : #endif
4078 0 : buf->size = tile_size;
4079 :
4080 0 : if (!is_last_tile) {
4081 0 : *max_tile_size = AOMMAX(*max_tile_size, tile_size);
4082 : // size of this tile
4083 0 : mem_put_le32(buf->data, tile_size);
4084 : }
4085 :
4086 0 : total_size += tile_size;
4087 : }
4088 : }
4089 : #if CONFIG_TILE_GROUPS
4090 : // Write the final tile group size
4091 0 : if (n_log2_tiles) {
4092 0 : aom_wb_overwrite_literal(&tg_params_wb, (1 << n_log2_tiles) - tile_count,
4093 : n_log2_tiles);
4094 0 : aom_wb_overwrite_literal(&tg_params_wb, tile_count - 1, n_log2_tiles);
4095 : }
4096 : // Remux if possible. TODO (Thomas Davies): do this for more than one tile
4097 : // group
4098 0 : if (have_tiles && tg_count == 1) {
4099 0 : int data_size = total_size - (uncompressed_hdr_size + comp_hdr_size);
4100 0 : data_size = remux_tiles(cm, dst + uncompressed_hdr_size + comp_hdr_size,
4101 : data_size, *max_tile_size, *max_tile_col_size,
4102 : &tile_size_bytes, &tile_col_size_bytes);
4103 0 : total_size = data_size + uncompressed_hdr_size + comp_hdr_size;
4104 0 : aom_wb_overwrite_literal(&tile_size_bytes_wb, tile_size_bytes - 1, 2);
4105 : }
4106 :
4107 : #endif
4108 : #endif // CONFIG_EXT_TILE
4109 0 : return (uint32_t)total_size;
4110 : }
4111 :
4112 0 : static void write_render_size(const AV1_COMMON *cm,
4113 : struct aom_write_bit_buffer *wb) {
4114 0 : const int scaling_active =
4115 0 : cm->width != cm->render_width || cm->height != cm->render_height;
4116 0 : aom_wb_write_bit(wb, scaling_active);
4117 0 : if (scaling_active) {
4118 0 : aom_wb_write_literal(wb, cm->render_width - 1, 16);
4119 0 : aom_wb_write_literal(wb, cm->render_height - 1, 16);
4120 : }
4121 0 : }
4122 :
4123 : #if CONFIG_FRAME_SUPERRES
4124 : static void write_superres_scale(const AV1_COMMON *const cm,
4125 : struct aom_write_bit_buffer *wb) {
4126 : // First bit is whether to to scale or not
4127 : if (cm->superres_scale_numerator == SUPERRES_SCALE_DENOMINATOR) {
4128 : aom_wb_write_bit(wb, 0); // no scaling
4129 : } else {
4130 : aom_wb_write_bit(wb, 1); // scaling, write scale factor
4131 : // TODO(afergs): write factor to the compressed header instead
4132 : aom_wb_write_literal(
4133 : wb, cm->superres_scale_numerator - SUPERRES_SCALE_NUMERATOR_MIN,
4134 : SUPERRES_SCALE_BITS);
4135 : }
4136 : }
4137 : #endif // CONFIG_FRAME_SUPERRES
4138 :
4139 0 : static void write_frame_size(const AV1_COMMON *cm,
4140 : struct aom_write_bit_buffer *wb) {
4141 0 : aom_wb_write_literal(wb, cm->width - 1, 16);
4142 0 : aom_wb_write_literal(wb, cm->height - 1, 16);
4143 :
4144 0 : write_render_size(cm, wb);
4145 : #if CONFIG_FRAME_SUPERRES
4146 : write_superres_scale(cm, wb);
4147 : #endif // CONFIG_FRAME_SUPERRES
4148 0 : }
4149 :
4150 0 : static void write_frame_size_with_refs(AV1_COMP *cpi,
4151 : struct aom_write_bit_buffer *wb) {
4152 0 : AV1_COMMON *const cm = &cpi->common;
4153 0 : int found = 0;
4154 :
4155 : MV_REFERENCE_FRAME ref_frame;
4156 0 : for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
4157 0 : YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
4158 :
4159 0 : if (cfg != NULL) {
4160 0 : found =
4161 0 : cm->width == cfg->y_crop_width && cm->height == cfg->y_crop_height;
4162 0 : found &= cm->render_width == cfg->render_width &&
4163 0 : cm->render_height == cfg->render_height;
4164 : }
4165 0 : aom_wb_write_bit(wb, found);
4166 0 : if (found) {
4167 0 : break;
4168 : }
4169 : }
4170 :
4171 0 : if (!found) {
4172 0 : write_frame_size(cm, wb);
4173 : }
4174 0 : }
4175 :
4176 0 : static void write_sync_code(struct aom_write_bit_buffer *wb) {
4177 0 : aom_wb_write_literal(wb, AV1_SYNC_CODE_0, 8);
4178 0 : aom_wb_write_literal(wb, AV1_SYNC_CODE_1, 8);
4179 0 : aom_wb_write_literal(wb, AV1_SYNC_CODE_2, 8);
4180 0 : }
4181 :
4182 0 : static void write_profile(BITSTREAM_PROFILE profile,
4183 : struct aom_write_bit_buffer *wb) {
4184 0 : switch (profile) {
4185 0 : case PROFILE_0: aom_wb_write_literal(wb, 0, 2); break;
4186 0 : case PROFILE_1: aom_wb_write_literal(wb, 2, 2); break;
4187 0 : case PROFILE_2: aom_wb_write_literal(wb, 1, 2); break;
4188 0 : case PROFILE_3: aom_wb_write_literal(wb, 6, 3); break;
4189 0 : default: assert(0);
4190 : }
4191 0 : }
4192 :
4193 0 : static void write_bitdepth_colorspace_sampling(
4194 : AV1_COMMON *const cm, struct aom_write_bit_buffer *wb) {
4195 0 : if (cm->profile >= PROFILE_2) {
4196 0 : assert(cm->bit_depth > AOM_BITS_8);
4197 0 : aom_wb_write_bit(wb, cm->bit_depth == AOM_BITS_10 ? 0 : 1);
4198 : }
4199 0 : aom_wb_write_literal(wb, cm->color_space, 3);
4200 0 : if (cm->color_space != AOM_CS_SRGB) {
4201 : // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
4202 0 : aom_wb_write_bit(wb, cm->color_range);
4203 0 : if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
4204 0 : assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
4205 0 : aom_wb_write_bit(wb, cm->subsampling_x);
4206 0 : aom_wb_write_bit(wb, cm->subsampling_y);
4207 0 : aom_wb_write_bit(wb, 0); // unused
4208 : } else {
4209 0 : assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
4210 : }
4211 : } else {
4212 0 : assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
4213 0 : aom_wb_write_bit(wb, 0); // unused
4214 : }
4215 0 : }
4216 :
4217 : #if CONFIG_REFERENCE_BUFFER
4218 0 : void write_sequence_header(SequenceHeader *seq_params) {
4219 : /* Placeholder for actually writing to the bitstream */
4220 0 : seq_params->frame_id_numbers_present_flag = FRAME_ID_NUMBERS_PRESENT_FLAG;
4221 0 : seq_params->frame_id_length_minus7 = FRAME_ID_LENGTH_MINUS7;
4222 0 : seq_params->delta_frame_id_length_minus2 = DELTA_FRAME_ID_LENGTH_MINUS2;
4223 0 : }
4224 : #endif
4225 :
4226 : #if CONFIG_EXT_INTER
4227 0 : static void write_compound_tools(const AV1_COMMON *cm,
4228 : struct aom_write_bit_buffer *wb) {
4229 : (void)cm;
4230 : (void)wb;
4231 : #if CONFIG_INTERINTRA
4232 0 : if (!frame_is_intra_only(cm) && cm->reference_mode != COMPOUND_REFERENCE) {
4233 0 : aom_wb_write_bit(wb, cm->allow_interintra_compound);
4234 : } else {
4235 0 : assert(cm->allow_interintra_compound == 0);
4236 : }
4237 : #endif // CONFIG_INTERINTRA
4238 : #if CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
4239 0 : if (!frame_is_intra_only(cm) && cm->reference_mode != SINGLE_REFERENCE) {
4240 0 : aom_wb_write_bit(wb, cm->allow_masked_compound);
4241 : } else {
4242 0 : assert(cm->allow_masked_compound == 0);
4243 : }
4244 : #endif // CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
4245 0 : }
4246 : #endif // CONFIG_EXT_INTER
4247 :
4248 0 : static void write_uncompressed_header(AV1_COMP *cpi,
4249 : struct aom_write_bit_buffer *wb) {
4250 0 : AV1_COMMON *const cm = &cpi->common;
4251 0 : MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
4252 :
4253 : #if CONFIG_REFERENCE_BUFFER
4254 : /* TODO: Move outside frame loop or inside key-frame branch */
4255 0 : write_sequence_header(&cpi->seq_params);
4256 : #endif
4257 :
4258 0 : aom_wb_write_literal(wb, AOM_FRAME_MARKER, 2);
4259 :
4260 0 : write_profile(cm->profile, wb);
4261 :
4262 : #if CONFIG_EXT_REFS
4263 : // NOTE: By default all coded frames to be used as a reference
4264 0 : cm->is_reference_frame = 1;
4265 :
4266 0 : if (cm->show_existing_frame) {
4267 0 : RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
4268 0 : const int frame_to_show = cm->ref_frame_map[cpi->existing_fb_idx_to_show];
4269 :
4270 0 : if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
4271 0 : aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4272 : "Buffer %d does not contain a reconstructed frame",
4273 : frame_to_show);
4274 : }
4275 0 : ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
4276 :
4277 0 : aom_wb_write_bit(wb, 1); // show_existing_frame
4278 0 : aom_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
4279 :
4280 : #if CONFIG_REFERENCE_BUFFER
4281 0 : if (cpi->seq_params.frame_id_numbers_present_flag) {
4282 0 : int frame_id_len = cpi->seq_params.frame_id_length_minus7 + 7;
4283 0 : int display_frame_id = cm->ref_frame_id[cpi->existing_fb_idx_to_show];
4284 0 : aom_wb_write_literal(wb, display_frame_id, frame_id_len);
4285 : /* Add a zero byte to prevent emulation of superframe marker */
4286 : /* Same logic as when when terminating the entropy coder */
4287 : /* Consider to have this logic only one place */
4288 0 : aom_wb_write_literal(wb, 0, 8);
4289 : }
4290 : #endif
4291 :
4292 0 : return;
4293 : } else {
4294 : #endif // CONFIG_EXT_REFS
4295 0 : aom_wb_write_bit(wb, 0); // show_existing_frame
4296 : #if CONFIG_EXT_REFS
4297 : }
4298 : #endif // CONFIG_EXT_REFS
4299 :
4300 0 : aom_wb_write_bit(wb, cm->frame_type);
4301 0 : aom_wb_write_bit(wb, cm->show_frame);
4302 0 : aom_wb_write_bit(wb, cm->error_resilient_mode);
4303 :
4304 : #if CONFIG_REFERENCE_BUFFER
4305 0 : cm->invalid_delta_frame_id_minus1 = 0;
4306 0 : if (cpi->seq_params.frame_id_numbers_present_flag) {
4307 0 : int frame_id_len = cpi->seq_params.frame_id_length_minus7 + 7;
4308 0 : aom_wb_write_literal(wb, cm->current_frame_id, frame_id_len);
4309 : }
4310 : #endif
4311 :
4312 : #if CONFIG_FRAME_SUPERRES
4313 : // TODO(afergs): Remove - this is just to stop superres from breaking
4314 : cm->superres_scale_numerator = SUPERRES_SCALE_DENOMINATOR;
4315 : #endif // CONFIG_FRAME_SUPERRES
4316 :
4317 0 : if (cm->frame_type == KEY_FRAME) {
4318 0 : write_sync_code(wb);
4319 0 : write_bitdepth_colorspace_sampling(cm, wb);
4320 0 : write_frame_size(cm, wb);
4321 : #if CONFIG_ANS && ANS_MAX_SYMBOLS
4322 : assert(cpi->common.ans_window_size_log2 >= 8);
4323 : assert(cpi->common.ans_window_size_log2 < 24);
4324 : aom_wb_write_literal(wb, cpi->common.ans_window_size_log2 - 8, 4);
4325 : #endif // CONFIG_ANS && ANS_MAX_SYMBOLS
4326 : #if CONFIG_PALETTE || CONFIG_INTRABC
4327 0 : aom_wb_write_bit(wb, cm->allow_screen_content_tools);
4328 : #endif // CONFIG_PALETTE || CONFIG_INTRABC
4329 : } else {
4330 0 : if (!cm->show_frame) aom_wb_write_bit(wb, cm->intra_only);
4331 : #if CONFIG_PALETTE || CONFIG_INTRABC
4332 0 : if (cm->intra_only) aom_wb_write_bit(wb, cm->allow_screen_content_tools);
4333 : #endif // CONFIG_PALETTE || CONFIG_INTRABC
4334 0 : if (!cm->error_resilient_mode) {
4335 0 : if (cm->intra_only) {
4336 0 : aom_wb_write_bit(wb,
4337 0 : cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
4338 : } else {
4339 0 : aom_wb_write_bit(wb,
4340 0 : cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
4341 0 : if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
4342 0 : aom_wb_write_bit(wb,
4343 0 : cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
4344 : }
4345 : }
4346 :
4347 : #if CONFIG_EXT_REFS
4348 0 : cpi->refresh_frame_mask = get_refresh_mask(cpi);
4349 : #endif // CONFIG_EXT_REFS
4350 :
4351 0 : if (cm->intra_only) {
4352 0 : write_sync_code(wb);
4353 0 : write_bitdepth_colorspace_sampling(cm, wb);
4354 :
4355 : #if CONFIG_EXT_REFS
4356 0 : aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
4357 : #else
4358 : aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
4359 : #endif // CONFIG_EXT_REFS
4360 0 : write_frame_size(cm, wb);
4361 :
4362 : #if CONFIG_ANS && ANS_MAX_SYMBOLS
4363 : assert(cpi->common.ans_window_size_log2 >= 8);
4364 : assert(cpi->common.ans_window_size_log2 < 24);
4365 : aom_wb_write_literal(wb, cpi->common.ans_window_size_log2 - 8, 4);
4366 : #endif // CONFIG_ANS && ANS_MAX_SYMBOLS
4367 : } else {
4368 : MV_REFERENCE_FRAME ref_frame;
4369 :
4370 : #if CONFIG_EXT_REFS
4371 0 : aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
4372 : #else
4373 : aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
4374 : #endif // CONFIG_EXT_REFS
4375 :
4376 : #if CONFIG_EXT_REFS
4377 0 : if (!cpi->refresh_frame_mask) {
4378 : // NOTE: "cpi->refresh_frame_mask == 0" indicates that the coded frame
4379 : // will not be used as a reference
4380 0 : cm->is_reference_frame = 0;
4381 : }
4382 : #endif // CONFIG_EXT_REFS
4383 :
4384 0 : for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
4385 0 : assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
4386 0 : aom_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
4387 : REF_FRAMES_LOG2);
4388 0 : aom_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
4389 : #if CONFIG_REFERENCE_BUFFER
4390 0 : if (cpi->seq_params.frame_id_numbers_present_flag) {
4391 0 : int i = get_ref_frame_map_idx(cpi, ref_frame);
4392 0 : int frame_id_len = cpi->seq_params.frame_id_length_minus7 + 7;
4393 0 : int diff_len = cpi->seq_params.delta_frame_id_length_minus2 + 2;
4394 0 : int delta_frame_id_minus1 =
4395 0 : ((cm->current_frame_id - cm->ref_frame_id[i] +
4396 0 : (1 << frame_id_len)) %
4397 0 : (1 << frame_id_len)) -
4398 : 1;
4399 0 : if (delta_frame_id_minus1 < 0 ||
4400 0 : delta_frame_id_minus1 >= (1 << diff_len))
4401 0 : cm->invalid_delta_frame_id_minus1 = 1;
4402 0 : aom_wb_write_literal(wb, delta_frame_id_minus1, diff_len);
4403 : }
4404 : #endif
4405 : }
4406 :
4407 : #if CONFIG_FRAME_SIZE
4408 : if (cm->error_resilient_mode == 0) {
4409 : write_frame_size_with_refs(cpi, wb);
4410 : } else {
4411 : write_frame_size(cm, wb);
4412 : }
4413 : #else
4414 0 : write_frame_size_with_refs(cpi, wb);
4415 : #endif
4416 :
4417 0 : aom_wb_write_bit(wb, cm->allow_high_precision_mv);
4418 :
4419 0 : fix_interp_filter(cm, cpi->td.counts);
4420 0 : write_frame_interp_filter(cm->interp_filter, wb);
4421 : #if CONFIG_TEMPMV_SIGNALING
4422 0 : if (!cm->error_resilient_mode) {
4423 0 : aom_wb_write_bit(wb, cm->use_prev_frame_mvs);
4424 : }
4425 : #endif
4426 : }
4427 : }
4428 :
4429 : #if CONFIG_REFERENCE_BUFFER
4430 0 : cm->refresh_mask = cm->frame_type == KEY_FRAME ? 0xFF : get_refresh_mask(cpi);
4431 : #endif
4432 :
4433 0 : if (!cm->error_resilient_mode) {
4434 0 : aom_wb_write_bit(
4435 0 : wb, cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD);
4436 : }
4437 :
4438 0 : aom_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
4439 :
4440 0 : assert(cm->mib_size == mi_size_wide[cm->sb_size]);
4441 0 : assert(cm->mib_size == 1 << cm->mib_size_log2);
4442 : #if CONFIG_EXT_PARTITION
4443 : assert(cm->sb_size == BLOCK_128X128 || cm->sb_size == BLOCK_64X64);
4444 : aom_wb_write_bit(wb, cm->sb_size == BLOCK_128X128 ? 1 : 0);
4445 : #else
4446 0 : assert(cm->sb_size == BLOCK_64X64);
4447 : #endif // CONFIG_EXT_PARTITION
4448 :
4449 0 : encode_loopfilter(cm, wb);
4450 : #if CONFIG_CDEF
4451 0 : encode_cdef(cm, wb);
4452 : #endif
4453 : #if CONFIG_LOOP_RESTORATION
4454 : encode_restoration_mode(cm, wb);
4455 : #endif // CONFIG_LOOP_RESTORATION
4456 0 : encode_quantization(cm, wb);
4457 0 : encode_segmentation(cm, xd, wb);
4458 : #if CONFIG_DELTA_Q
4459 : {
4460 : int i;
4461 0 : struct segmentation *const seg = &cm->seg;
4462 0 : int segment_quantizer_active = 0;
4463 0 : for (i = 0; i < MAX_SEGMENTS; i++) {
4464 0 : if (segfeature_active(seg, i, SEG_LVL_ALT_Q)) {
4465 0 : segment_quantizer_active = 1;
4466 : }
4467 : }
4468 :
4469 0 : if (cm->delta_q_present_flag)
4470 0 : assert(segment_quantizer_active == 0 && cm->base_qindex > 0);
4471 0 : if (segment_quantizer_active == 0 && cm->base_qindex > 0) {
4472 0 : aom_wb_write_bit(wb, cm->delta_q_present_flag);
4473 0 : if (cm->delta_q_present_flag) {
4474 0 : aom_wb_write_literal(wb, OD_ILOG_NZ(cm->delta_q_res) - 1, 2);
4475 0 : xd->prev_qindex = cm->base_qindex;
4476 : #if CONFIG_EXT_DELTA_Q
4477 0 : assert(seg->abs_delta == SEGMENT_DELTADATA);
4478 0 : aom_wb_write_bit(wb, cm->delta_lf_present_flag);
4479 0 : if (cm->delta_lf_present_flag) {
4480 0 : aom_wb_write_literal(wb, OD_ILOG_NZ(cm->delta_lf_res) - 1, 2);
4481 0 : xd->prev_delta_lf_from_base = 0;
4482 : }
4483 : #endif // CONFIG_EXT_DELTA_Q
4484 : }
4485 : }
4486 : }
4487 : #endif
4488 :
4489 0 : write_tx_mode(cm, xd, &cm->tx_mode, wb);
4490 :
4491 0 : if (cpi->allow_comp_inter_inter) {
4492 0 : const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
4493 : #if !CONFIG_REF_ADAPT
4494 0 : const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
4495 : #endif // !CONFIG_REF_ADAPT
4496 :
4497 0 : aom_wb_write_bit(wb, use_hybrid_pred);
4498 : #if !CONFIG_REF_ADAPT
4499 0 : if (!use_hybrid_pred) aom_wb_write_bit(wb, use_compound_pred);
4500 : #endif // !CONFIG_REF_ADAPT
4501 : }
4502 : #if CONFIG_EXT_INTER
4503 0 : write_compound_tools(cm, wb);
4504 : #endif // CONFIG_EXT_INTER
4505 :
4506 : #if CONFIG_EXT_TX
4507 0 : aom_wb_write_bit(wb, cm->reduced_tx_set_used);
4508 : #endif // CONFIG_EXT_TX
4509 :
4510 0 : write_tile_info(cm, wb);
4511 : }
4512 :
4513 : #if CONFIG_GLOBAL_MOTION
4514 0 : static void write_global_motion_params(WarpedMotionParams *params,
4515 : WarpedMotionParams *ref_params,
4516 : aom_prob *probs, aom_writer *w,
4517 : int allow_hp) {
4518 0 : TransformationType type = params->wmtype;
4519 : int trans_bits;
4520 : int trans_prec_diff;
4521 0 : av1_write_token(w, av1_global_motion_types_tree, probs,
4522 0 : &global_motion_types_encodings[type]);
4523 0 : switch (type) {
4524 : case HOMOGRAPHY:
4525 : case HORTRAPEZOID:
4526 : case VERTRAPEZOID:
4527 0 : if (type != HORTRAPEZOID)
4528 0 : aom_write_signed_primitive_refsubexpfin(
4529 : w, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K,
4530 0 : (ref_params->wmmat[6] >> GM_ROW3HOMO_PREC_DIFF),
4531 0 : (params->wmmat[6] >> GM_ROW3HOMO_PREC_DIFF));
4532 0 : if (type != VERTRAPEZOID)
4533 0 : aom_write_signed_primitive_refsubexpfin(
4534 : w, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K,
4535 0 : (ref_params->wmmat[7] >> GM_ROW3HOMO_PREC_DIFF),
4536 0 : (params->wmmat[7] >> GM_ROW3HOMO_PREC_DIFF));
4537 : // fallthrough intended
4538 : case AFFINE:
4539 : case ROTZOOM:
4540 0 : aom_write_signed_primitive_refsubexpfin(
4541 : w, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4542 0 : (ref_params->wmmat[2] >> GM_ALPHA_PREC_DIFF) -
4543 : (1 << GM_ALPHA_PREC_BITS),
4544 0 : (params->wmmat[2] >> GM_ALPHA_PREC_DIFF) - (1 << GM_ALPHA_PREC_BITS));
4545 0 : if (type != VERTRAPEZOID)
4546 0 : aom_write_signed_primitive_refsubexpfin(
4547 : w, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4548 0 : (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF),
4549 0 : (params->wmmat[3] >> GM_ALPHA_PREC_DIFF));
4550 0 : if (type >= AFFINE) {
4551 0 : if (type != HORTRAPEZOID)
4552 0 : aom_write_signed_primitive_refsubexpfin(
4553 : w, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4554 0 : (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF),
4555 0 : (params->wmmat[4] >> GM_ALPHA_PREC_DIFF));
4556 0 : aom_write_signed_primitive_refsubexpfin(
4557 : w, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4558 0 : (ref_params->wmmat[5] >> GM_ALPHA_PREC_DIFF) -
4559 : (1 << GM_ALPHA_PREC_BITS),
4560 0 : (params->wmmat[5] >> GM_ALPHA_PREC_DIFF) -
4561 : (1 << GM_ALPHA_PREC_BITS));
4562 : }
4563 : // fallthrough intended
4564 : case TRANSLATION:
4565 0 : trans_bits = (type == TRANSLATION) ? GM_ABS_TRANS_ONLY_BITS - !allow_hp
4566 0 : : GM_ABS_TRANS_BITS;
4567 0 : trans_prec_diff = (type == TRANSLATION)
4568 0 : ? GM_TRANS_ONLY_PREC_DIFF + !allow_hp
4569 0 : : GM_TRANS_PREC_DIFF;
4570 0 : aom_write_signed_primitive_refsubexpfin(
4571 0 : w, (1 << trans_bits) + 1, SUBEXPFIN_K,
4572 0 : (ref_params->wmmat[0] >> trans_prec_diff),
4573 0 : (params->wmmat[0] >> trans_prec_diff));
4574 0 : aom_write_signed_primitive_refsubexpfin(
4575 0 : w, (1 << trans_bits) + 1, SUBEXPFIN_K,
4576 0 : (ref_params->wmmat[1] >> trans_prec_diff),
4577 0 : (params->wmmat[1] >> trans_prec_diff));
4578 0 : break;
4579 0 : case IDENTITY: break;
4580 0 : default: assert(0);
4581 : }
4582 0 : }
4583 :
4584 0 : static void write_global_motion(AV1_COMP *cpi, aom_writer *w) {
4585 0 : AV1_COMMON *const cm = &cpi->common;
4586 : int frame;
4587 0 : for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
4588 0 : write_global_motion_params(
4589 0 : &cm->global_motion[frame], &cm->prev_frame->global_motion[frame],
4590 0 : cm->fc->global_motion_types_prob, w, cm->allow_high_precision_mv);
4591 : /*
4592 : printf("Frame %d/%d: Enc Ref %d (used %d): %d %d %d %d\n",
4593 : cm->current_video_frame, cm->show_frame, frame,
4594 : cpi->global_motion_used[frame], cm->global_motion[frame].wmmat[0],
4595 : cm->global_motion[frame].wmmat[1], cm->global_motion[frame].wmmat[2],
4596 : cm->global_motion[frame].wmmat[3]);
4597 : */
4598 : }
4599 0 : }
4600 : #endif
4601 :
4602 0 : static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
4603 0 : AV1_COMMON *const cm = &cpi->common;
4604 : #if CONFIG_SUPERTX
4605 : MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
4606 : #endif // CONFIG_SUPERTX
4607 0 : FRAME_CONTEXT *const fc = cm->fc;
4608 0 : FRAME_COUNTS *counts = cpi->td.counts;
4609 : aom_writer *header_bc;
4610 : int i, j;
4611 :
4612 : #if CONFIG_TILE_GROUPS
4613 0 : const int probwt = cm->num_tg;
4614 : #else
4615 : const int probwt = 1;
4616 : #endif
4617 :
4618 : #if CONFIG_ANS
4619 : int header_size;
4620 : header_bc = &cpi->buf_ans;
4621 : buf_ans_write_init(header_bc, data);
4622 : #else
4623 : aom_writer real_header_bc;
4624 0 : header_bc = &real_header_bc;
4625 0 : aom_start_encode(header_bc, data);
4626 : #endif
4627 :
4628 : #if CONFIG_LOOP_RESTORATION
4629 : encode_restoration(cm, header_bc);
4630 : #endif // CONFIG_LOOP_RESTORATION
4631 : #if !CONFIG_EC_ADAPT
4632 : update_txfm_probs(cm, header_bc, counts);
4633 : #endif
4634 : #if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
4635 : if (cm->tx_mode == TX_MODE_SELECT)
4636 : av1_cond_prob_diff_update(header_bc, &cm->fc->quarter_tx_size_prob,
4637 : cm->counts.quarter_tx_size, probwt);
4638 : #endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
4639 : #if CONFIG_LV_MAP
4640 : av1_write_txb_probs(cpi, header_bc);
4641 : #else
4642 : #if !CONFIG_PVQ
4643 : #if !CONFIG_EC_ADAPT
4644 : update_coef_probs(cpi, header_bc);
4645 : #endif // !CONFIG_EC_ADAPT
4646 : #endif // CONFIG_PVQ
4647 : #endif // CONFIG_LV_MAP
4648 :
4649 : #if CONFIG_VAR_TX
4650 0 : update_txfm_partition_probs(cm, header_bc, counts, probwt);
4651 : #endif
4652 :
4653 0 : update_skip_probs(cm, header_bc, counts);
4654 : #if !CONFIG_EC_ADAPT && CONFIG_DELTA_Q
4655 : update_delta_q_probs(cm, header_bc, counts);
4656 : #if CONFIG_EXT_DELTA_Q
4657 : update_delta_lf_probs(cm, header_bc, counts);
4658 : #endif
4659 : #endif
4660 : #if !CONFIG_EC_ADAPT
4661 : update_seg_probs(cpi, header_bc);
4662 :
4663 : for (i = 0; i < INTRA_MODES; ++i) {
4664 : prob_diff_update(av1_intra_mode_tree, fc->uv_mode_prob[i],
4665 : counts->uv_mode[i], INTRA_MODES, probwt, header_bc);
4666 : }
4667 :
4668 : #if CONFIG_EXT_PARTITION_TYPES
4669 : for (i = 0; i < PARTITION_PLOFFSET; ++i)
4670 : prob_diff_update(av1_partition_tree, fc->partition_prob[i],
4671 : counts->partition[i], PARTITION_TYPES, probwt, header_bc);
4672 : for (; i < PARTITION_CONTEXTS_PRIMARY; ++i)
4673 : prob_diff_update(av1_ext_partition_tree, fc->partition_prob[i],
4674 : counts->partition[i], EXT_PARTITION_TYPES, probwt,
4675 : header_bc);
4676 : #else
4677 : for (i = 0; i < PARTITION_CONTEXTS_PRIMARY; ++i)
4678 : prob_diff_update(av1_partition_tree, fc->partition_prob[i],
4679 : counts->partition[i], PARTITION_TYPES, probwt, header_bc);
4680 : #endif // CONFIG_EXT_PARTITION_TYPES
4681 : #if CONFIG_UNPOISON_PARTITION_CTX
4682 : for (; i < PARTITION_CONTEXTS_PRIMARY + PARTITION_BLOCK_SIZES; ++i) {
4683 : unsigned int ct[2] = { counts->partition[i][PARTITION_VERT],
4684 : counts->partition[i][PARTITION_SPLIT] };
4685 : assert(counts->partition[i][PARTITION_NONE] == 0);
4686 : assert(counts->partition[i][PARTITION_HORZ] == 0);
4687 : assert(fc->partition_prob[i][PARTITION_NONE] == 0);
4688 : assert(fc->partition_prob[i][PARTITION_HORZ] == 0);
4689 : av1_cond_prob_diff_update(header_bc, &fc->partition_prob[i][PARTITION_VERT],
4690 : ct, probwt);
4691 : }
4692 : for (; i < PARTITION_CONTEXTS_PRIMARY + 2 * PARTITION_BLOCK_SIZES; ++i) {
4693 : unsigned int ct[2] = { counts->partition[i][PARTITION_HORZ],
4694 : counts->partition[i][PARTITION_SPLIT] };
4695 : assert(counts->partition[i][PARTITION_NONE] == 0);
4696 : assert(counts->partition[i][PARTITION_VERT] == 0);
4697 : assert(fc->partition_prob[i][PARTITION_NONE] == 0);
4698 : assert(fc->partition_prob[i][PARTITION_VERT] == 0);
4699 : av1_cond_prob_diff_update(header_bc, &fc->partition_prob[i][PARTITION_HORZ],
4700 : ct, probwt);
4701 : }
4702 : #endif
4703 : #if CONFIG_EXT_INTRA && CONFIG_INTRA_INTERP
4704 : for (i = 0; i < INTRA_FILTERS + 1; ++i)
4705 : prob_diff_update(av1_intra_filter_tree, fc->intra_filter_probs[i],
4706 : counts->intra_filter[i], INTRA_FILTERS, probwt, header_bc);
4707 : #endif // CONFIG_EXT_INTRA && CONFIG_INTRA_INTERP
4708 : #endif // !CONFIG_EC_ADAPT
4709 :
4710 0 : if (frame_is_intra_only(cm)) {
4711 0 : av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
4712 0 : av1_copy(cm->fc->kf_y_cdf, av1_kf_y_mode_cdf);
4713 :
4714 : #if !CONFIG_EC_ADAPT
4715 : for (i = 0; i < INTRA_MODES; ++i)
4716 : for (j = 0; j < INTRA_MODES; ++j)
4717 : prob_diff_update(av1_intra_mode_tree, cm->kf_y_prob[i][j],
4718 : counts->kf_y_mode[i][j], INTRA_MODES, probwt,
4719 : header_bc);
4720 : #endif // CONFIG_EC_ADAPT
4721 : #if CONFIG_INTRABC
4722 : if (cm->allow_screen_content_tools) {
4723 : av1_cond_prob_diff_update(header_bc, &fc->intrabc_prob,
4724 : cm->counts.intrabc, probwt);
4725 : }
4726 : #endif
4727 : } else {
4728 0 : update_inter_mode_probs(cm, header_bc, counts);
4729 : #if CONFIG_EXT_INTER
4730 0 : update_inter_compound_mode_probs(cm, probwt, header_bc);
4731 : #if CONFIG_INTERINTRA
4732 0 : if (cm->reference_mode != COMPOUND_REFERENCE &&
4733 0 : cm->allow_interintra_compound) {
4734 0 : for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
4735 0 : if (is_interintra_allowed_bsize_group(i)) {
4736 0 : av1_cond_prob_diff_update(header_bc, &fc->interintra_prob[i],
4737 0 : cm->counts.interintra[i], probwt);
4738 : }
4739 : }
4740 0 : for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
4741 0 : prob_diff_update(
4742 0 : av1_interintra_mode_tree, cm->fc->interintra_mode_prob[i],
4743 0 : counts->interintra_mode[i], INTERINTRA_MODES, probwt, header_bc);
4744 : }
4745 : #if CONFIG_WEDGE
4746 0 : for (i = 0; i < BLOCK_SIZES; i++) {
4747 0 : if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
4748 0 : av1_cond_prob_diff_update(header_bc, &fc->wedge_interintra_prob[i],
4749 0 : cm->counts.wedge_interintra[i], probwt);
4750 : }
4751 : #endif // CONFIG_WEDGE
4752 : }
4753 : #endif // CONFIG_INTERINTRA
4754 : #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
4755 0 : if (cm->reference_mode != SINGLE_REFERENCE && cm->allow_masked_compound) {
4756 0 : for (i = 0; i < BLOCK_SIZES; i++)
4757 0 : prob_diff_update(av1_compound_type_tree, fc->compound_type_prob[i],
4758 0 : cm->counts.compound_interinter[i], COMPOUND_TYPES,
4759 : probwt, header_bc);
4760 : }
4761 : #endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
4762 : #endif // CONFIG_EXT_INTER
4763 :
4764 : #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
4765 0 : for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i)
4766 0 : prob_diff_update(av1_motion_mode_tree, fc->motion_mode_prob[i],
4767 0 : counts->motion_mode[i], MOTION_MODES, probwt, header_bc);
4768 : #endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
4769 : #if !CONFIG_EC_ADAPT
4770 : if (cm->interp_filter == SWITCHABLE)
4771 : update_switchable_interp_probs(cm, header_bc, counts);
4772 : #endif
4773 :
4774 0 : for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
4775 0 : av1_cond_prob_diff_update(header_bc, &fc->intra_inter_prob[i],
4776 0 : counts->intra_inter[i], probwt);
4777 :
4778 0 : if (cpi->allow_comp_inter_inter) {
4779 0 : const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
4780 0 : if (use_hybrid_pred)
4781 0 : for (i = 0; i < COMP_INTER_CONTEXTS; i++)
4782 0 : av1_cond_prob_diff_update(header_bc, &fc->comp_inter_prob[i],
4783 0 : counts->comp_inter[i], probwt);
4784 : }
4785 :
4786 0 : if (cm->reference_mode != COMPOUND_REFERENCE) {
4787 0 : for (i = 0; i < REF_CONTEXTS; i++) {
4788 0 : for (j = 0; j < (SINGLE_REFS - 1); j++) {
4789 0 : av1_cond_prob_diff_update(header_bc, &fc->single_ref_prob[i][j],
4790 0 : counts->single_ref[i][j], probwt);
4791 : }
4792 : }
4793 : }
4794 0 : if (cm->reference_mode != SINGLE_REFERENCE) {
4795 0 : for (i = 0; i < REF_CONTEXTS; i++) {
4796 : #if CONFIG_EXT_REFS
4797 0 : for (j = 0; j < (FWD_REFS - 1); j++) {
4798 0 : av1_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
4799 0 : counts->comp_ref[i][j], probwt);
4800 : }
4801 0 : for (j = 0; j < (BWD_REFS - 1); j++) {
4802 0 : av1_cond_prob_diff_update(header_bc, &fc->comp_bwdref_prob[i][j],
4803 0 : counts->comp_bwdref[i][j], probwt);
4804 : }
4805 : #else
4806 : for (j = 0; j < (COMP_REFS - 1); j++) {
4807 : av1_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
4808 : counts->comp_ref[i][j], probwt);
4809 : }
4810 : #endif // CONFIG_EXT_REFS
4811 : }
4812 : }
4813 :
4814 : #if !CONFIG_EC_ADAPT
4815 : for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) {
4816 : prob_diff_update(av1_intra_mode_tree, cm->fc->y_mode_prob[i],
4817 : counts->y_mode[i], INTRA_MODES, probwt, header_bc);
4818 : }
4819 : #endif
4820 :
4821 0 : av1_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc, counts->mv);
4822 : #if !CONFIG_EC_ADAPT
4823 : update_ext_tx_probs(cm, header_bc);
4824 : #endif
4825 : #if CONFIG_SUPERTX
4826 : if (!xd->lossless[0]) update_supertx_probs(cm, probwt, header_bc);
4827 : #endif // CONFIG_SUPERTX
4828 : #if CONFIG_GLOBAL_MOTION
4829 0 : write_global_motion(cpi, header_bc);
4830 : #endif // CONFIG_GLOBAL_MOTION
4831 : }
4832 : #if !CONFIG_EC_ADAPT
4833 : av1_coef_head_cdfs(fc);
4834 : av1_coef_pareto_cdfs(fc);
4835 : for (i = 0; i < NMV_CONTEXTS; ++i) av1_set_mv_cdfs(&fc->nmvc[i]);
4836 : av1_set_mode_cdfs(cm);
4837 : #endif // !CONFIG_EC_ADAPT
4838 : #if CONFIG_ANS
4839 : aom_buf_ans_flush(header_bc);
4840 : header_size = buf_ans_write_end(header_bc);
4841 : assert(header_size <= 0xffff);
4842 : return header_size;
4843 : #else
4844 0 : aom_stop_encode(header_bc);
4845 0 : assert(header_bc->pos <= 0xffff);
4846 0 : return header_bc->pos;
4847 : #endif // CONFIG_ANS
4848 : }
4849 :
4850 0 : static int choose_size_bytes(uint32_t size, int spare_msbs) {
4851 : // Choose the number of bytes required to represent size, without
4852 : // using the 'spare_msbs' number of most significant bits.
4853 :
4854 : // Make sure we will fit in 4 bytes to start with..
4855 0 : if (spare_msbs > 0 && size >> (32 - spare_msbs) != 0) return -1;
4856 :
4857 : // Normalise to 32 bits
4858 0 : size <<= spare_msbs;
4859 :
4860 0 : if (size >> 24 != 0)
4861 0 : return 4;
4862 0 : else if (size >> 16 != 0)
4863 0 : return 3;
4864 0 : else if (size >> 8 != 0)
4865 0 : return 2;
4866 : else
4867 0 : return 1;
4868 : }
4869 :
4870 0 : static void mem_put_varsize(uint8_t *const dst, const int sz, const int val) {
4871 0 : switch (sz) {
4872 0 : case 1: dst[0] = (uint8_t)(val & 0xff); break;
4873 0 : case 2: mem_put_le16(dst, val); break;
4874 0 : case 3: mem_put_le24(dst, val); break;
4875 0 : case 4: mem_put_le32(dst, val); break;
4876 0 : default: assert(0 && "Invalid size"); break;
4877 : }
4878 0 : }
4879 0 : static int remux_tiles(const AV1_COMMON *const cm, uint8_t *dst,
4880 : const uint32_t data_size, const uint32_t max_tile_size,
4881 : const uint32_t max_tile_col_size,
4882 : int *const tile_size_bytes,
4883 : int *const tile_col_size_bytes) {
4884 : // Choose the tile size bytes (tsb) and tile column size bytes (tcsb)
4885 : #if CONFIG_EXT_TILE
4886 : // The top bit in the tile size field indicates tile copy mode, so we
4887 : // have 1 less bit to code the tile size
4888 : const int tsb = choose_size_bytes(max_tile_size, 1);
4889 : const int tcsb = choose_size_bytes(max_tile_col_size, 0);
4890 : #else
4891 0 : const int tsb = choose_size_bytes(max_tile_size, 0);
4892 0 : const int tcsb = 4; // This is ignored
4893 : (void)max_tile_col_size;
4894 : #endif // CONFIG_EXT_TILE
4895 :
4896 0 : assert(tsb > 0);
4897 0 : assert(tcsb > 0);
4898 :
4899 0 : *tile_size_bytes = tsb;
4900 0 : *tile_col_size_bytes = tcsb;
4901 :
4902 0 : if (tsb == 4 && tcsb == 4) {
4903 0 : return data_size;
4904 : } else {
4905 0 : uint32_t wpos = 0;
4906 0 : uint32_t rpos = 0;
4907 :
4908 : #if CONFIG_EXT_TILE
4909 : int tile_row;
4910 : int tile_col;
4911 :
4912 : for (tile_col = 0; tile_col < cm->tile_cols; tile_col++) {
4913 : // All but the last column has a column header
4914 : if (tile_col < cm->tile_cols - 1) {
4915 : uint32_t tile_col_size = mem_get_le32(dst + rpos);
4916 : rpos += 4;
4917 :
4918 : // Adjust the tile column size by the number of bytes removed
4919 : // from the tile size fields.
4920 : tile_col_size -= (4 - tsb) * cm->tile_rows;
4921 :
4922 : mem_put_varsize(dst + wpos, tcsb, tile_col_size);
4923 : wpos += tcsb;
4924 : }
4925 :
4926 : for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
4927 : // All, including the last row has a header
4928 : uint32_t tile_header = mem_get_le32(dst + rpos);
4929 : rpos += 4;
4930 :
4931 : // If this is a copy tile, we need to shift the MSB to the
4932 : // top bit of the new width, and there is no data to copy.
4933 : if (tile_header >> 31 != 0) {
4934 : if (tsb < 4) tile_header >>= 32 - 8 * tsb;
4935 : mem_put_varsize(dst + wpos, tsb, tile_header);
4936 : wpos += tsb;
4937 : } else {
4938 : mem_put_varsize(dst + wpos, tsb, tile_header);
4939 : wpos += tsb;
4940 :
4941 : memmove(dst + wpos, dst + rpos, tile_header);
4942 : rpos += tile_header;
4943 : wpos += tile_header;
4944 : }
4945 : }
4946 : }
4947 : #else
4948 0 : const int n_tiles = cm->tile_cols * cm->tile_rows;
4949 : int n;
4950 :
4951 0 : for (n = 0; n < n_tiles; n++) {
4952 : int tile_size;
4953 :
4954 0 : if (n == n_tiles - 1) {
4955 0 : tile_size = data_size - rpos;
4956 : } else {
4957 0 : tile_size = mem_get_le32(dst + rpos);
4958 0 : rpos += 4;
4959 0 : mem_put_varsize(dst + wpos, tsb, tile_size);
4960 0 : wpos += tsb;
4961 : }
4962 :
4963 0 : memmove(dst + wpos, dst + rpos, tile_size);
4964 :
4965 0 : rpos += tile_size;
4966 0 : wpos += tile_size;
4967 : }
4968 : #endif // CONFIG_EXT_TILE
4969 :
4970 0 : assert(rpos > wpos);
4971 0 : assert(rpos == data_size);
4972 :
4973 0 : return wpos;
4974 : }
4975 : }
4976 :
4977 0 : void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size) {
4978 0 : uint8_t *data = dst;
4979 : #if !CONFIG_TILE_GROUPS
4980 : uint32_t compressed_header_size;
4981 : uint32_t uncompressed_header_size;
4982 : struct aom_write_bit_buffer saved_wb;
4983 : #endif
4984 : uint32_t data_size;
4985 0 : struct aom_write_bit_buffer wb = { data, 0 };
4986 :
4987 : unsigned int max_tile_size;
4988 : unsigned int max_tile_col_size;
4989 :
4990 : #if CONFIG_BITSTREAM_DEBUG
4991 : bitstream_queue_reset_write();
4992 : #endif
4993 :
4994 : #if !CONFIG_TILE_GROUPS
4995 : int tile_size_bytes;
4996 : int tile_col_size_bytes;
4997 : AV1_COMMON *const cm = &cpi->common;
4998 : const int have_tiles = cm->tile_cols * cm->tile_rows > 1;
4999 :
5000 : // Write the uncompressed header
5001 : write_uncompressed_header(cpi, &wb);
5002 :
5003 : #if CONFIG_EXT_REFS
5004 : if (cm->show_existing_frame) {
5005 : *size = aom_wb_bytes_written(&wb);
5006 : return;
5007 : }
5008 : #endif // CONFIG_EXT_REFS
5009 :
5010 : // We do not know these in advance. Output placeholder bit.
5011 : saved_wb = wb;
5012 : // Write tile size magnitudes
5013 : if (have_tiles) {
5014 : // Note that the last item in the uncompressed header is the data
5015 : // describing tile configuration.
5016 : #if CONFIG_EXT_TILE
5017 : // Number of bytes in tile column size - 1
5018 : aom_wb_write_literal(&wb, 0, 2);
5019 : #endif // CONFIG_EXT_TILE
5020 : // Number of bytes in tile size - 1
5021 : aom_wb_write_literal(&wb, 0, 2);
5022 : }
5023 : // Size of compressed header
5024 : aom_wb_write_literal(&wb, 0, 16);
5025 :
5026 : uncompressed_header_size = (uint32_t)aom_wb_bytes_written(&wb);
5027 : data += uncompressed_header_size;
5028 :
5029 : aom_clear_system_state();
5030 :
5031 : // Write the compressed header
5032 : compressed_header_size = write_compressed_header(cpi, data);
5033 : data += compressed_header_size;
5034 :
5035 : // Write the encoded tile data
5036 : data_size = write_tiles(cpi, data, &max_tile_size, &max_tile_col_size);
5037 : #else
5038 0 : data_size = write_tiles(cpi, &wb, &max_tile_size, &max_tile_col_size);
5039 : #endif
5040 : #if !CONFIG_TILE_GROUPS
5041 : if (have_tiles) {
5042 : data_size =
5043 : remux_tiles(cm, data, data_size, max_tile_size, max_tile_col_size,
5044 : &tile_size_bytes, &tile_col_size_bytes);
5045 : }
5046 :
5047 : data += data_size;
5048 :
5049 : // Now fill in the gaps in the uncompressed header.
5050 : if (have_tiles) {
5051 : #if CONFIG_EXT_TILE
5052 : assert(tile_col_size_bytes >= 1 && tile_col_size_bytes <= 4);
5053 : aom_wb_write_literal(&saved_wb, tile_col_size_bytes - 1, 2);
5054 : #endif // CONFIG_EXT_TILE
5055 : assert(tile_size_bytes >= 1 && tile_size_bytes <= 4);
5056 : aom_wb_write_literal(&saved_wb, tile_size_bytes - 1, 2);
5057 : }
5058 : // TODO(jbb): Figure out what to do if compressed_header_size > 16 bits.
5059 : assert(compressed_header_size <= 0xffff);
5060 : aom_wb_write_literal(&saved_wb, compressed_header_size, 16);
5061 : #else
5062 0 : data += data_size;
5063 : #endif
5064 : #if CONFIG_ANS && ANS_REVERSE
5065 : // Avoid aliasing the superframe index
5066 : *data++ = 0;
5067 : #endif
5068 0 : *size = data - dst;
5069 0 : }
|