Line data Source code
1 : /*
2 : * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 : *
4 : * This source code is subject to the terms of the BSD 2 Clause License and
5 : * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 : * was not distributed with this source code in the LICENSE file, you can
7 : * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 : * Media Patent License 1.0 was not distributed with this source code in the
9 : * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 : */
11 :
12 : #ifndef AV1_COMMON_ONYXC_INT_H_
13 : #define AV1_COMMON_ONYXC_INT_H_
14 :
15 : #include "./aom_config.h"
16 : #include "./av1_rtcd.h"
17 : #include "aom/internal/aom_codec_internal.h"
18 : #include "aom_util/aom_thread.h"
19 : #if CONFIG_ANS
20 : #include "aom_dsp/ans.h"
21 : #endif
22 : #include "av1/common/alloccommon.h"
23 : #include "av1/common/av1_loopfilter.h"
24 : #include "av1/common/entropy.h"
25 : #include "av1/common/entropymode.h"
26 : #include "av1/common/entropymv.h"
27 : #include "av1/common/frame_buffers.h"
28 : #include "av1/common/mv.h"
29 : #include "av1/common/quant_common.h"
30 : #if CONFIG_LOOP_RESTORATION
31 : #include "av1/common/restoration.h"
32 : #endif // CONFIG_LOOP_RESTORATION
33 : #include "av1/common/tile_common.h"
34 : #include "av1/common/odintrin.h"
35 : #if CONFIG_PVQ
36 : #include "av1/common/pvq.h"
37 : #endif
38 : #if CONFIG_CFL
39 : #include "av1/common/cfl.h"
40 : #endif
41 : #ifdef __cplusplus
42 : extern "C" {
43 : #endif
44 :
45 : #define CDEF_MAX_STRENGTHS 16
46 :
47 : #define REF_FRAMES_LOG2 3
48 : #define REF_FRAMES (1 << REF_FRAMES_LOG2)
49 :
50 : // 4 scratch frames for the new frames to support a maximum of 4 cores decoding
51 : // in parallel, 3 for scaled references on the encoder.
52 : // TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number
53 : // of framebuffers.
54 : // TODO(jkoleszar): These 3 extra references could probably come from the
55 : // normal reference pool.
56 : #define FRAME_BUFFERS (REF_FRAMES + 7)
57 :
58 : #if CONFIG_REFERENCE_BUFFER
59 : /* Constant values while waiting for the sequence header */
60 : #define FRAME_ID_NUMBERS_PRESENT_FLAG 1
61 : #define FRAME_ID_LENGTH_MINUS7 8 // Allows frame id up to 2^15-1
62 : #define DELTA_FRAME_ID_LENGTH_MINUS2 12 // Allows frame id deltas up to 2^14-1
63 : #endif
64 :
65 : #if CONFIG_EXT_REFS
66 : #define FRAME_CONTEXTS_LOG2 3
67 : #else
68 : #define FRAME_CONTEXTS_LOG2 2
69 : #endif
70 :
71 : #define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2)
72 :
73 : #define NUM_PING_PONG_BUFFERS 2
74 :
75 : typedef enum {
76 : SINGLE_REFERENCE = 0,
77 : COMPOUND_REFERENCE = 1,
78 : REFERENCE_MODE_SELECT = 2,
79 : REFERENCE_MODES = 3,
80 : } REFERENCE_MODE;
81 :
82 : typedef enum {
83 : RESET_FRAME_CONTEXT_NONE = 0,
84 : RESET_FRAME_CONTEXT_CURRENT = 1,
85 : RESET_FRAME_CONTEXT_ALL = 2,
86 : } RESET_FRAME_CONTEXT_MODE;
87 :
88 : typedef enum {
89 : /**
90 : * Update frame context to values resulting from forward probability
91 : * updates signaled in the frame header
92 : */
93 : REFRESH_FRAME_CONTEXT_FORWARD,
94 : /**
95 : * Update frame context to values resulting from backward probability
96 : * updates based on entropy/counts in the decoded frame
97 : */
98 : REFRESH_FRAME_CONTEXT_BACKWARD,
99 : } REFRESH_FRAME_CONTEXT_MODE;
100 :
101 : typedef struct {
102 : int_mv mv[2];
103 : int_mv pred_mv[2];
104 : MV_REFERENCE_FRAME ref_frame[2];
105 : } MV_REF;
106 :
107 : typedef struct {
108 : int ref_count;
109 : MV_REF *mvs;
110 : int mi_rows;
111 : int mi_cols;
112 : #if CONFIG_GLOBAL_MOTION
113 : WarpedMotionParams global_motion[TOTAL_REFS_PER_FRAME];
114 : #endif // CONFIG_GLOBAL_MOTION
115 : aom_codec_frame_buffer_t raw_frame_buffer;
116 : YV12_BUFFER_CONFIG buf;
117 : #if CONFIG_TEMPMV_SIGNALING
118 : uint8_t intra_only;
119 : #endif
120 : // The Following variables will only be used in frame parallel decode.
121 :
122 : // frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
123 : // that no FrameWorker owns, or is decoding, this buffer.
124 : AVxWorker *frame_worker_owner;
125 :
126 : // row and col indicate which position frame has been decoded to in real
127 : // pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
128 : // when the frame is fully decoded.
129 : int row;
130 : int col;
131 : } RefCntBuffer;
132 :
133 : typedef struct BufferPool {
134 : // Protect BufferPool from being accessed by several FrameWorkers at
135 : // the same time during frame parallel decode.
136 : // TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
137 : #if CONFIG_MULTITHREAD
138 : pthread_mutex_t pool_mutex;
139 : #endif
140 :
141 : // Private data associated with the frame buffer callbacks.
142 : void *cb_priv;
143 :
144 : aom_get_frame_buffer_cb_fn_t get_fb_cb;
145 : aom_release_frame_buffer_cb_fn_t release_fb_cb;
146 :
147 : RefCntBuffer frame_bufs[FRAME_BUFFERS];
148 :
149 : // Frame buffers allocated internally by the codec.
150 : InternalFrameBufferList int_frame_buffers;
151 : } BufferPool;
152 :
153 : typedef struct AV1Common {
154 : struct aom_internal_error_info error;
155 : aom_color_space_t color_space;
156 : int color_range;
157 : int width;
158 : int height;
159 : int render_width;
160 : int render_height;
161 : int last_width;
162 : int last_height;
163 :
164 : // TODO(jkoleszar): this implies chroma ss right now, but could vary per
165 : // plane. Revisit as part of the future change to YV12_BUFFER_CONFIG to
166 : // support additional planes.
167 : int subsampling_x;
168 : int subsampling_y;
169 :
170 : #if CONFIG_HIGHBITDEPTH
171 : // Marks if we need to use 16bit frame buffers (1: yes, 0: no).
172 : int use_highbitdepth;
173 : #endif
174 : YV12_BUFFER_CONFIG *frame_to_show;
175 : RefCntBuffer *prev_frame;
176 :
177 : // TODO(hkuang): Combine this with cur_buf in macroblockd.
178 : RefCntBuffer *cur_frame;
179 :
180 : int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */
181 :
182 : // Prepare ref_frame_map for the next frame.
183 : // Only used in frame parallel decode.
184 : int next_ref_frame_map[REF_FRAMES];
185 :
186 : // TODO(jkoleszar): could expand active_ref_idx to 4, with 0 as intra, and
187 : // roll new_fb_idx into it.
188 :
189 : // Each Inter frame can reference INTER_REFS_PER_FRAME buffers
190 : RefBuffer frame_refs[INTER_REFS_PER_FRAME];
191 :
192 : int new_fb_idx;
193 :
194 : FRAME_TYPE last_frame_type; /* last frame's frame type for motion search.*/
195 : FRAME_TYPE frame_type;
196 :
197 : int show_frame;
198 : int last_show_frame;
199 : int show_existing_frame;
200 : #if CONFIG_EXT_REFS
201 : // Flag for a frame used as a reference - not written to the bitstream
202 : int is_reference_frame;
203 : #endif // CONFIG_EXT_REFS
204 :
205 : // Flag signaling that the frame is encoded using only INTRA modes.
206 : uint8_t intra_only;
207 : uint8_t last_intra_only;
208 :
209 : int allow_high_precision_mv;
210 :
211 : #if CONFIG_PALETTE || CONFIG_INTRABC
212 : int allow_screen_content_tools;
213 : #endif // CONFIG_PALETTE || CONFIG_INTRABC
214 : #if CONFIG_EXT_INTER
215 : #if CONFIG_INTERINTRA
216 : int allow_interintra_compound;
217 : #endif // CONFIG_INTERINTRA
218 : #if CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
219 : int allow_masked_compound;
220 : #endif // CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
221 : #endif // CONFIG_EXT_INTER
222 :
223 : // Flag signaling which frame contexts should be reset to default values.
224 : RESET_FRAME_CONTEXT_MODE reset_frame_context;
225 :
226 : // MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in
227 : // MODE_INFO (8-pixel) units.
228 : int MBs;
229 : int mb_rows, mi_rows;
230 : int mb_cols, mi_cols;
231 : int mi_stride;
232 :
233 : /* profile settings */
234 : TX_MODE tx_mode;
235 :
236 : int base_qindex;
237 : int y_dc_delta_q;
238 : int uv_dc_delta_q;
239 : int uv_ac_delta_q;
240 : int16_t y_dequant[MAX_SEGMENTS][2];
241 : int16_t uv_dequant[MAX_SEGMENTS][2];
242 :
243 : #if CONFIG_AOM_QM
244 : // Global quant matrix tables
245 : qm_val_t *giqmatrix[NUM_QM_LEVELS][2][2][TX_SIZES_ALL];
246 : qm_val_t *gqmatrix[NUM_QM_LEVELS][2][2][TX_SIZES_ALL];
247 :
248 : // Local quant matrix tables for each frame
249 : qm_val_t *y_iqmatrix[MAX_SEGMENTS][2][TX_SIZES_ALL];
250 : qm_val_t *uv_iqmatrix[MAX_SEGMENTS][2][TX_SIZES_ALL];
251 : // Encoder
252 : qm_val_t *y_qmatrix[MAX_SEGMENTS][2][TX_SIZES_ALL];
253 : qm_val_t *uv_qmatrix[MAX_SEGMENTS][2][TX_SIZES_ALL];
254 :
255 : int using_qmatrix;
256 : int min_qmlevel;
257 : int max_qmlevel;
258 : #endif
259 : #if CONFIG_NEW_QUANT
260 : dequant_val_type_nuq y_dequant_nuq[MAX_SEGMENTS][QUANT_PROFILES][COEF_BANDS];
261 : dequant_val_type_nuq uv_dequant_nuq[MAX_SEGMENTS][QUANT_PROFILES][COEF_BANDS];
262 : #endif
263 :
264 : /* We allocate a MODE_INFO struct for each macroblock, together with
265 : an extra row on top and column on the left to simplify prediction. */
266 : int mi_alloc_size;
267 : MODE_INFO *mip; /* Base of allocated array */
268 : MODE_INFO *mi; /* Corresponds to upper left visible macroblock */
269 :
270 : // TODO(agrange): Move prev_mi into encoder structure.
271 : // prev_mip and prev_mi will only be allocated in encoder.
272 : MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
273 : MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
274 :
275 : // Separate mi functions between encoder and decoder.
276 : int (*alloc_mi)(struct AV1Common *cm, int mi_size);
277 : void (*free_mi)(struct AV1Common *cm);
278 : void (*setup_mi)(struct AV1Common *cm);
279 :
280 : // Grid of pointers to 8x8 MODE_INFO structs. Any 8x8 not in the visible
281 : // area will be NULL.
282 : MODE_INFO **mi_grid_base;
283 : MODE_INFO **mi_grid_visible;
284 : MODE_INFO **prev_mi_grid_base;
285 : MODE_INFO **prev_mi_grid_visible;
286 :
287 : // Whether to use previous frame's motion vectors for prediction.
288 : int use_prev_frame_mvs;
289 :
290 : // Persistent mb segment id map used in prediction.
291 : int seg_map_idx;
292 : int prev_seg_map_idx;
293 :
294 : uint8_t *seg_map_array[NUM_PING_PONG_BUFFERS];
295 : uint8_t *last_frame_seg_map;
296 : uint8_t *current_frame_seg_map;
297 : int seg_map_alloc_size;
298 :
299 : InterpFilter interp_filter;
300 :
301 : loop_filter_info_n lf_info;
302 : #if CONFIG_FRAME_SUPERRES
303 : // The numerator of the superres scale; the denominator is fixed.
304 : uint8_t superres_scale_numerator;
305 : #endif // CONFIG_FRAME_SUPERRES
306 : #if CONFIG_LOOP_RESTORATION
307 : RestorationInfo rst_info[MAX_MB_PLANE];
308 : RestorationInternal rst_internal;
309 : #endif // CONFIG_LOOP_RESTORATION
310 :
311 : // Flag signaling how frame contexts should be updated at the end of
312 : // a frame decode
313 : REFRESH_FRAME_CONTEXT_MODE refresh_frame_context;
314 :
315 : int ref_frame_sign_bias[TOTAL_REFS_PER_FRAME]; /* Two state 0, 1 */
316 :
317 : struct loopfilter lf;
318 : struct segmentation seg;
319 :
320 : int frame_parallel_decode; // frame-based threading.
321 :
322 : #if CONFIG_EXT_TX
323 : int reduced_tx_set_used;
324 : #endif // CONFIG_EXT_TX
325 :
326 : // Context probabilities for reference frame prediction
327 : #if CONFIG_EXT_REFS
328 : MV_REFERENCE_FRAME comp_fwd_ref[FWD_REFS];
329 : MV_REFERENCE_FRAME comp_bwd_ref[BWD_REFS];
330 : #else
331 : MV_REFERENCE_FRAME comp_fixed_ref;
332 : MV_REFERENCE_FRAME comp_var_ref[COMP_REFS];
333 : #endif // CONFIG_EXT_REFS
334 : REFERENCE_MODE reference_mode;
335 :
336 : FRAME_CONTEXT *fc; /* this frame entropy */
337 : FRAME_CONTEXT *frame_contexts; // FRAME_CONTEXTS
338 : FRAME_CONTEXT *pre_fc; // Context referenced in this frame
339 : unsigned int frame_context_idx; /* Context to use/update */
340 : FRAME_COUNTS counts;
341 :
342 : unsigned int current_video_frame;
343 : BITSTREAM_PROFILE profile;
344 :
345 : // AOM_BITS_8 in profile 0 or 1, AOM_BITS_10 or AOM_BITS_12 in profile 2 or 3.
346 : aom_bit_depth_t bit_depth;
347 : aom_bit_depth_t dequant_bit_depth; // bit_depth of current dequantizer
348 :
349 : int error_resilient_mode;
350 :
351 : #if !CONFIG_EXT_TILE
352 : int log2_tile_cols, log2_tile_rows;
353 : #endif // !CONFIG_EXT_TILE
354 : int tile_cols, tile_rows;
355 : int tile_width, tile_height; // In MI units
356 : #if CONFIG_EXT_TILE
357 : unsigned int tile_encoding_mode;
358 : #endif // CONFIG_EXT_TILE
359 :
360 : #if CONFIG_DEPENDENT_HORZTILES
361 : int dependent_horz_tiles;
362 : #if CONFIG_TILE_GROUPS
363 : int tile_group_start_row[MAX_TILE_ROWS][MAX_TILE_COLS];
364 : int tile_group_start_col[MAX_TILE_ROWS][MAX_TILE_COLS];
365 : #endif
366 : #endif
367 : #if CONFIG_LOOPFILTERING_ACROSS_TILES
368 : int loop_filter_across_tiles_enabled;
369 : #endif // CONFIG_LOOPFILTERING_ACROSS_TILES
370 :
371 : int byte_alignment;
372 : int skip_loop_filter;
373 :
374 : // Private data associated with the frame buffer callbacks.
375 : void *cb_priv;
376 : aom_get_frame_buffer_cb_fn_t get_fb_cb;
377 : aom_release_frame_buffer_cb_fn_t release_fb_cb;
378 :
379 : // Handles memory for the codec.
380 : InternalFrameBufferList int_frame_buffers;
381 :
382 : // External BufferPool passed from outside.
383 : BufferPool *buffer_pool;
384 :
385 : PARTITION_CONTEXT *above_seg_context;
386 : ENTROPY_CONTEXT *above_context[MAX_MB_PLANE];
387 : #if CONFIG_VAR_TX
388 : TXFM_CONTEXT *above_txfm_context;
389 : TXFM_CONTEXT *top_txfm_context[MAX_MB_PLANE];
390 : TXFM_CONTEXT left_txfm_context[MAX_MB_PLANE][2 * MAX_MIB_SIZE];
391 : #endif
392 : int above_context_alloc_cols;
393 :
394 : // scratch memory for intraonly/keyframe forward updates from default tables
395 : // - this is intentionally not placed in FRAME_CONTEXT since it's reset upon
396 : // each keyframe and not used afterwards
397 : aom_prob kf_y_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
398 : #if CONFIG_GLOBAL_MOTION
399 : WarpedMotionParams global_motion[TOTAL_REFS_PER_FRAME];
400 : #endif
401 :
402 : BLOCK_SIZE sb_size; // Size of the superblock used for this frame
403 : int mib_size; // Size of the superblock in units of MI blocks
404 : int mib_size_log2; // Log 2 of above.
405 : #if CONFIG_CDEF
406 : int cdef_dering_damping;
407 : int cdef_clpf_damping;
408 : int nb_cdef_strengths;
409 : int cdef_strengths[CDEF_MAX_STRENGTHS];
410 : int cdef_uv_strengths[CDEF_MAX_STRENGTHS];
411 : int cdef_bits;
412 : #endif
413 :
414 : #if CONFIG_DELTA_Q
415 : int delta_q_present_flag;
416 : // Resolution of delta quant
417 : int delta_q_res;
418 : #if CONFIG_EXT_DELTA_Q
419 : int delta_lf_present_flag;
420 : // Resolution of delta lf level
421 : int delta_lf_res;
422 : #endif
423 : #endif
424 : #if CONFIG_TILE_GROUPS
425 : int num_tg;
426 : #endif
427 : #if CONFIG_REFERENCE_BUFFER
428 : int current_frame_id;
429 : int ref_frame_id[REF_FRAMES];
430 : int valid_for_referencing[REF_FRAMES];
431 : int refresh_mask;
432 : int invalid_delta_frame_id_minus1;
433 : #endif
434 : #if CONFIG_ANS && ANS_MAX_SYMBOLS
435 : int ans_window_size_log2;
436 : #endif
437 : } AV1_COMMON;
438 :
439 : #if CONFIG_REFERENCE_BUFFER
440 : /* Initial version of sequence header structure */
441 : typedef struct SequenceHeader {
442 : int frame_id_numbers_present_flag;
443 : int frame_id_length_minus7;
444 : int delta_frame_id_length_minus2;
445 : } SequenceHeader;
446 : #endif
447 :
448 : // TODO(hkuang): Don't need to lock the whole pool after implementing atomic
449 : // frame reference count.
450 0 : static void lock_buffer_pool(BufferPool *const pool) {
451 : #if CONFIG_MULTITHREAD
452 0 : pthread_mutex_lock(&pool->pool_mutex);
453 : #else
454 : (void)pool;
455 : #endif
456 0 : }
457 :
458 0 : static void unlock_buffer_pool(BufferPool *const pool) {
459 : #if CONFIG_MULTITHREAD
460 0 : pthread_mutex_unlock(&pool->pool_mutex);
461 : #else
462 : (void)pool;
463 : #endif
464 0 : }
465 :
466 0 : static INLINE YV12_BUFFER_CONFIG *get_ref_frame(AV1_COMMON *cm, int index) {
467 0 : if (index < 0 || index >= REF_FRAMES) return NULL;
468 0 : if (cm->ref_frame_map[index] < 0) return NULL;
469 0 : assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
470 0 : return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf;
471 : }
472 :
473 0 : static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(
474 : const AV1_COMMON *const cm) {
475 0 : return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
476 : }
477 :
478 0 : static INLINE int get_free_fb(AV1_COMMON *cm) {
479 0 : RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
480 : int i;
481 :
482 0 : lock_buffer_pool(cm->buffer_pool);
483 0 : for (i = 0; i < FRAME_BUFFERS; ++i)
484 0 : if (frame_bufs[i].ref_count == 0) break;
485 :
486 0 : if (i != FRAME_BUFFERS) {
487 0 : frame_bufs[i].ref_count = 1;
488 : } else {
489 : // Reset i to be INVALID_IDX to indicate no free buffer found.
490 0 : i = INVALID_IDX;
491 : }
492 :
493 0 : unlock_buffer_pool(cm->buffer_pool);
494 0 : return i;
495 : }
496 :
497 0 : static INLINE void ref_cnt_fb(RefCntBuffer *bufs, int *idx, int new_idx) {
498 0 : const int ref_index = *idx;
499 :
500 0 : if (ref_index >= 0 && bufs[ref_index].ref_count > 0)
501 0 : bufs[ref_index].ref_count--;
502 :
503 0 : *idx = new_idx;
504 :
505 0 : bufs[new_idx].ref_count++;
506 0 : }
507 :
508 0 : static INLINE int mi_cols_aligned_to_sb(const AV1_COMMON *cm) {
509 0 : return ALIGN_POWER_OF_TWO(cm->mi_cols, cm->mib_size_log2);
510 : }
511 :
512 0 : static INLINE int mi_rows_aligned_to_sb(const AV1_COMMON *cm) {
513 0 : return ALIGN_POWER_OF_TWO(cm->mi_rows, cm->mib_size_log2);
514 : }
515 :
516 0 : static INLINE int frame_is_intra_only(const AV1_COMMON *const cm) {
517 0 : return cm->frame_type == KEY_FRAME || cm->intra_only;
518 : }
519 :
520 0 : static INLINE void av1_init_macroblockd(AV1_COMMON *cm, MACROBLOCKD *xd,
521 : #if CONFIG_PVQ
522 : tran_low_t *pvq_ref_coeff,
523 : #endif
524 : #if CONFIG_CFL
525 : CFL_CTX *cfl,
526 : #endif
527 : tran_low_t *dqcoeff) {
528 : int i;
529 0 : for (i = 0; i < MAX_MB_PLANE; ++i) {
530 0 : xd->plane[i].dqcoeff = dqcoeff;
531 : #if CONFIG_PVQ
532 : xd->plane[i].pvq_ref_coeff = pvq_ref_coeff;
533 : #endif
534 : #if CONFIG_CFL
535 : xd->cfl = cfl;
536 : cfl_init(cfl, cm, xd->plane[AOM_PLANE_U].subsampling_x,
537 : xd->plane[AOM_PLANE_U].subsampling_y);
538 : #endif
539 0 : xd->above_context[i] = cm->above_context[i];
540 0 : if (xd->plane[i].plane_type == PLANE_TYPE_Y) {
541 0 : memcpy(xd->plane[i].seg_dequant, cm->y_dequant, sizeof(cm->y_dequant));
542 : #if CONFIG_AOM_QM
543 : memcpy(xd->plane[i].seg_iqmatrix, cm->y_iqmatrix, sizeof(cm->y_iqmatrix));
544 : #endif
545 :
546 : #if CONFIG_NEW_QUANT
547 : memcpy(xd->plane[i].seg_dequant_nuq, cm->y_dequant_nuq,
548 : sizeof(cm->y_dequant_nuq));
549 : #endif
550 : } else {
551 0 : memcpy(xd->plane[i].seg_dequant, cm->uv_dequant, sizeof(cm->uv_dequant));
552 : #if CONFIG_AOM_QM
553 : memcpy(xd->plane[i].seg_iqmatrix, cm->uv_iqmatrix,
554 : sizeof(cm->uv_iqmatrix));
555 : #endif
556 : #if CONFIG_NEW_QUANT
557 : memcpy(xd->plane[i].seg_dequant_nuq, cm->uv_dequant_nuq,
558 : sizeof(cm->uv_dequant_nuq));
559 : #endif
560 : }
561 0 : xd->fc = cm->fc;
562 : }
563 0 : xd->above_seg_context = cm->above_seg_context;
564 : #if CONFIG_VAR_TX
565 0 : xd->above_txfm_context = cm->above_txfm_context;
566 : #endif
567 0 : xd->mi_stride = cm->mi_stride;
568 0 : xd->error_info = &cm->error;
569 0 : }
570 :
571 0 : static INLINE void set_skip_context(MACROBLOCKD *xd, int mi_row, int mi_col) {
572 : int i;
573 0 : int row_offset = mi_row;
574 0 : int col_offset = mi_col;
575 0 : for (i = 0; i < MAX_MB_PLANE; ++i) {
576 0 : struct macroblockd_plane *const pd = &xd->plane[i];
577 : #if CONFIG_CHROMA_SUB8X8
578 0 : if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) {
579 : // Offset the buffer pointer
580 0 : if (pd->subsampling_y && (mi_row & 0x01)) row_offset = mi_row - 1;
581 0 : if (pd->subsampling_x && (mi_col & 0x01)) col_offset = mi_col - 1;
582 : }
583 : #endif
584 0 : int above_idx = col_offset << (MI_SIZE_LOG2 - tx_size_wide_log2[0]);
585 0 : int left_idx = (row_offset & MAX_MIB_MASK)
586 0 : << (MI_SIZE_LOG2 - tx_size_high_log2[0]);
587 0 : pd->above_context = &xd->above_context[i][above_idx >> pd->subsampling_x];
588 0 : pd->left_context = &xd->left_context[i][left_idx >> pd->subsampling_y];
589 : }
590 0 : }
591 :
592 0 : static INLINE int calc_mi_size(int len) {
593 : // len is in mi units.
594 0 : return len + MAX_MIB_SIZE;
595 : }
596 :
597 0 : static INLINE void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh) {
598 : int i;
599 0 : for (i = 0; i < MAX_MB_PLANE; i++) {
600 0 : xd->plane[i].n4_w = (bw << 1) >> xd->plane[i].subsampling_x;
601 0 : xd->plane[i].n4_h = (bh << 1) >> xd->plane[i].subsampling_y;
602 :
603 0 : xd->plane[i].width = (bw * MI_SIZE) >> xd->plane[i].subsampling_x;
604 0 : xd->plane[i].height = (bh * MI_SIZE) >> xd->plane[i].subsampling_y;
605 :
606 : #if !CONFIG_CHROMA_2X2
607 0 : xd->plane[i].width = AOMMAX(xd->plane[i].width, 4);
608 0 : xd->plane[i].height = AOMMAX(xd->plane[i].height, 4);
609 : #endif
610 : }
611 0 : }
612 :
613 0 : static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
614 : int mi_row, int bh, int mi_col, int bw,
615 : #if CONFIG_DEPENDENT_HORZTILES
616 : int dependent_horz_tile_flag,
617 : #endif // CONFIG_DEPENDENT_HORZTILES
618 : int mi_rows, int mi_cols) {
619 0 : xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8);
620 0 : xd->mb_to_bottom_edge = ((mi_rows - bh - mi_row) * MI_SIZE) * 8;
621 0 : xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
622 0 : xd->mb_to_right_edge = ((mi_cols - bw - mi_col) * MI_SIZE) * 8;
623 :
624 : #if CONFIG_DEPENDENT_HORZTILES
625 : if (dependent_horz_tile_flag) {
626 : #if CONFIG_TILE_GROUPS
627 : xd->up_available = (mi_row > tile->mi_row_start) || !tile->tg_horz_boundary;
628 : #else
629 : xd->up_available = (mi_row > 0);
630 : #endif // CONFIG_TILE_GROUPS
631 : } else {
632 : #endif // CONFIG_DEPENDENT_HORZTILES
633 : // Are edges available for intra prediction?
634 0 : xd->up_available = (mi_row > tile->mi_row_start);
635 : #if CONFIG_DEPENDENT_HORZTILES
636 : }
637 : #endif // CONFIG_DEPENDENT_HORZTILES
638 :
639 0 : xd->left_available = (mi_col > tile->mi_col_start);
640 : #if CONFIG_CHROMA_SUB8X8
641 0 : xd->chroma_up_available = xd->up_available;
642 0 : xd->chroma_left_available = xd->left_available;
643 0 : if (xd->plane[1].subsampling_x && bw < mi_size_wide[BLOCK_8X8])
644 0 : xd->chroma_left_available = (mi_col - 1) > tile->mi_col_start;
645 0 : if (xd->plane[1].subsampling_y && bh < mi_size_high[BLOCK_8X8])
646 0 : xd->chroma_up_available = (mi_row - 1) > tile->mi_row_start;
647 : #endif
648 0 : if (xd->up_available) {
649 0 : xd->above_mi = xd->mi[-xd->mi_stride];
650 : // above_mi may be NULL in encoder's first pass.
651 0 : xd->above_mbmi = xd->above_mi ? &xd->above_mi->mbmi : NULL;
652 : } else {
653 0 : xd->above_mi = NULL;
654 0 : xd->above_mbmi = NULL;
655 : }
656 :
657 0 : if (xd->left_available) {
658 0 : xd->left_mi = xd->mi[-1];
659 : // left_mi may be NULL in encoder's first pass.
660 0 : xd->left_mbmi = xd->left_mi ? &xd->left_mi->mbmi : NULL;
661 : } else {
662 0 : xd->left_mi = NULL;
663 0 : xd->left_mbmi = NULL;
664 : }
665 :
666 0 : xd->n8_h = bh;
667 0 : xd->n8_w = bw;
668 0 : xd->is_sec_rect = 0;
669 0 : if (xd->n8_w < xd->n8_h)
670 0 : if (mi_col & (xd->n8_h - 1)) xd->is_sec_rect = 1;
671 :
672 0 : if (xd->n8_w > xd->n8_h)
673 0 : if (mi_row & (xd->n8_w - 1)) xd->is_sec_rect = 1;
674 0 : }
675 :
676 : static INLINE const aom_prob *get_y_mode_probs(const AV1_COMMON *cm,
677 : const MODE_INFO *mi,
678 : const MODE_INFO *above_mi,
679 : const MODE_INFO *left_mi,
680 : int block) {
681 : const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
682 : const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
683 : return cm->kf_y_prob[above][left];
684 : }
685 :
686 0 : static INLINE aom_cdf_prob *get_y_mode_cdf(FRAME_CONTEXT *tile_ctx,
687 : const MODE_INFO *mi,
688 : const MODE_INFO *above_mi,
689 : const MODE_INFO *left_mi,
690 : int block) {
691 0 : const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
692 0 : const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
693 0 : return tile_ctx->kf_y_cdf[above][left];
694 : }
695 :
696 0 : static INLINE void update_partition_context(MACROBLOCKD *xd, int mi_row,
697 : int mi_col, BLOCK_SIZE subsize,
698 : BLOCK_SIZE bsize) {
699 0 : PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col;
700 0 : PARTITION_CONTEXT *const left_ctx =
701 0 : xd->left_seg_context + (mi_row & MAX_MIB_MASK);
702 :
703 : #if CONFIG_EXT_PARTITION_TYPES
704 : const int bw = mi_size_wide[bsize];
705 : const int bh = mi_size_high[bsize];
706 : memset(above_ctx, partition_context_lookup[subsize].above, bw);
707 : memset(left_ctx, partition_context_lookup[subsize].left, bh);
708 : #else
709 : // num_4x4_blocks_wide_lookup[bsize] / 2
710 0 : const int bs = mi_size_wide[bsize];
711 :
712 : // update the partition context at the end notes. set partition bits
713 : // of block sizes larger than the current one to be one, and partition
714 : // bits of smaller block sizes to be zero.
715 0 : memset(above_ctx, partition_context_lookup[subsize].above, bs);
716 0 : memset(left_ctx, partition_context_lookup[subsize].left, bs);
717 : #endif // CONFIG_EXT_PARTITION_TYPES
718 0 : }
719 :
720 : #if CONFIG_CB4X4
721 0 : static INLINE int is_chroma_reference(int mi_row, int mi_col, BLOCK_SIZE bsize,
722 : int subsampling_x, int subsampling_y) {
723 : #if CONFIG_CHROMA_2X2
724 : return 1;
725 : #endif
726 :
727 : #if CONFIG_CHROMA_SUB8X8
728 0 : const int bw = mi_size_wide[bsize];
729 0 : const int bh = mi_size_high[bsize];
730 :
731 0 : int ref_pos = ((mi_row & 0x01) || !(bh & 0x01) || !subsampling_y) &&
732 0 : ((mi_col & 0x01) || !(bw & 0x01) || !subsampling_x);
733 :
734 0 : return ref_pos;
735 : #else
736 : int ref_pos = !(((mi_row & 0x01) && subsampling_y) ||
737 : ((mi_col & 0x01) && subsampling_x));
738 :
739 : if (bsize >= BLOCK_8X8) ref_pos = 1;
740 :
741 : return ref_pos;
742 : #endif
743 : }
744 :
745 0 : static INLINE BLOCK_SIZE scale_chroma_bsize(BLOCK_SIZE bsize, int subsampling_x,
746 : int subsampling_y) {
747 0 : BLOCK_SIZE bs = bsize;
748 :
749 0 : if (bs < BLOCK_8X8) {
750 0 : if (subsampling_x == 1 && subsampling_y == 1)
751 0 : bs = BLOCK_8X8;
752 0 : else if (subsampling_x == 1)
753 0 : bs = BLOCK_8X4;
754 0 : else if (subsampling_y == 1)
755 0 : bs = BLOCK_4X8;
756 : }
757 :
758 0 : return bs;
759 : }
760 : #endif
761 :
762 : #if CONFIG_EXT_PARTITION_TYPES
763 : static INLINE void update_ext_partition_context(MACROBLOCKD *xd, int mi_row,
764 : int mi_col, BLOCK_SIZE subsize,
765 : BLOCK_SIZE bsize,
766 : PARTITION_TYPE partition) {
767 : if (bsize >= BLOCK_8X8) {
768 : const int hbs = mi_size_wide[bsize] / 2;
769 : BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
770 : switch (partition) {
771 : case PARTITION_SPLIT:
772 : if (bsize != BLOCK_8X8) break;
773 : case PARTITION_NONE:
774 : case PARTITION_HORZ:
775 : case PARTITION_VERT:
776 : update_partition_context(xd, mi_row, mi_col, subsize, bsize);
777 : break;
778 : case PARTITION_HORZ_A:
779 : update_partition_context(xd, mi_row, mi_col, bsize2, subsize);
780 : update_partition_context(xd, mi_row + hbs, mi_col, subsize, subsize);
781 : break;
782 : case PARTITION_HORZ_B:
783 : update_partition_context(xd, mi_row, mi_col, subsize, subsize);
784 : update_partition_context(xd, mi_row + hbs, mi_col, bsize2, subsize);
785 : break;
786 : case PARTITION_VERT_A:
787 : update_partition_context(xd, mi_row, mi_col, bsize2, subsize);
788 : update_partition_context(xd, mi_row, mi_col + hbs, subsize, subsize);
789 : break;
790 : case PARTITION_VERT_B:
791 : update_partition_context(xd, mi_row, mi_col, subsize, subsize);
792 : update_partition_context(xd, mi_row, mi_col + hbs, bsize2, subsize);
793 : break;
794 : default: assert(0 && "Invalid partition type");
795 : }
796 : }
797 : }
798 : #endif // CONFIG_EXT_PARTITION_TYPES
799 :
800 0 : static INLINE int partition_plane_context(const MACROBLOCKD *xd, int mi_row,
801 : int mi_col,
802 : #if CONFIG_UNPOISON_PARTITION_CTX
803 : int has_rows, int has_cols,
804 : #endif
805 : BLOCK_SIZE bsize) {
806 : #if CONFIG_UNPOISON_PARTITION_CTX
807 : const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col;
808 : const PARTITION_CONTEXT *left_ctx =
809 : xd->left_seg_context + (mi_row & MAX_MIB_MASK);
810 : // Minimum partition point is 8x8. Offset the bsl accordingly.
811 : const int bsl = mi_width_log2_lookup[bsize] - mi_width_log2_lookup[BLOCK_8X8];
812 : int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
813 :
814 : assert(b_width_log2_lookup[bsize] == b_height_log2_lookup[bsize]);
815 : assert(bsl >= 0);
816 :
817 : if (has_rows && has_cols)
818 : return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
819 : else if (has_rows && !has_cols)
820 : return PARTITION_CONTEXTS_PRIMARY + bsl;
821 : else if (!has_rows && has_cols)
822 : return PARTITION_CONTEXTS_PRIMARY + PARTITION_BLOCK_SIZES + bsl;
823 : else
824 : return PARTITION_CONTEXTS; // Bogus context, forced SPLIT
825 : #else
826 0 : const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col;
827 0 : const PARTITION_CONTEXT *left_ctx =
828 0 : xd->left_seg_context + (mi_row & MAX_MIB_MASK);
829 : // Minimum partition point is 8x8. Offset the bsl accordingly.
830 0 : const int bsl = mi_width_log2_lookup[bsize] - mi_width_log2_lookup[BLOCK_8X8];
831 0 : int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1;
832 :
833 0 : assert(b_width_log2_lookup[bsize] == b_height_log2_lookup[bsize]);
834 0 : assert(bsl >= 0);
835 :
836 0 : return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
837 : #endif
838 : }
839 :
840 0 : static INLINE int max_block_wide(const MACROBLOCKD *xd, BLOCK_SIZE bsize,
841 : int plane) {
842 0 : int max_blocks_wide = block_size_wide[bsize];
843 0 : const struct macroblockd_plane *const pd = &xd->plane[plane];
844 :
845 0 : if (xd->mb_to_right_edge < 0)
846 0 : max_blocks_wide += xd->mb_to_right_edge >> (3 + pd->subsampling_x);
847 :
848 : // Scale the width in the transform block unit.
849 0 : return max_blocks_wide >> tx_size_wide_log2[0];
850 : }
851 :
852 0 : static INLINE int max_block_high(const MACROBLOCKD *xd, BLOCK_SIZE bsize,
853 : int plane) {
854 0 : int max_blocks_high = block_size_high[bsize];
855 0 : const struct macroblockd_plane *const pd = &xd->plane[plane];
856 :
857 0 : if (xd->mb_to_bottom_edge < 0)
858 0 : max_blocks_high += xd->mb_to_bottom_edge >> (3 + pd->subsampling_y);
859 :
860 : // Scale the width in the transform block unit.
861 0 : return max_blocks_high >> tx_size_wide_log2[0];
862 : }
863 :
864 0 : static INLINE void av1_zero_above_context(AV1_COMMON *const cm,
865 : int mi_col_start, int mi_col_end) {
866 0 : const int width = mi_col_end - mi_col_start;
867 0 : const int aligned_width = ALIGN_POWER_OF_TWO(width, cm->mib_size_log2);
868 :
869 0 : const int offset_y = mi_col_start << (MI_SIZE_LOG2 - tx_size_wide_log2[0]);
870 0 : const int width_y = aligned_width << (MI_SIZE_LOG2 - tx_size_wide_log2[0]);
871 0 : const int offset_uv = offset_y >> cm->subsampling_x;
872 0 : const int width_uv = width_y >> cm->subsampling_x;
873 :
874 0 : av1_zero_array(cm->above_context[0] + offset_y, width_y);
875 0 : av1_zero_array(cm->above_context[1] + offset_uv, width_uv);
876 0 : av1_zero_array(cm->above_context[2] + offset_uv, width_uv);
877 :
878 0 : av1_zero_array(cm->above_seg_context + mi_col_start, aligned_width);
879 :
880 : #if CONFIG_VAR_TX
881 0 : av1_zero_array(cm->above_txfm_context + (mi_col_start << TX_UNIT_WIDE_LOG2),
882 : aligned_width << TX_UNIT_WIDE_LOG2);
883 : #endif // CONFIG_VAR_TX
884 0 : }
885 :
886 0 : static INLINE void av1_zero_left_context(MACROBLOCKD *const xd) {
887 0 : av1_zero(xd->left_context);
888 0 : av1_zero(xd->left_seg_context);
889 : #if CONFIG_VAR_TX
890 0 : av1_zero(xd->left_txfm_context_buffer);
891 : #endif
892 0 : }
893 :
894 : #if CONFIG_VAR_TX
895 0 : static INLINE TX_SIZE get_min_tx_size(TX_SIZE tx_size) {
896 0 : assert(tx_size < TX_SIZES_ALL);
897 0 : return txsize_sqr_map[tx_size];
898 : }
899 :
900 0 : static INLINE void set_txfm_ctx(TXFM_CONTEXT *txfm_ctx, uint8_t txs, int len) {
901 : int i;
902 0 : for (i = 0; i < len; ++i) txfm_ctx[i] = txs;
903 0 : }
904 :
905 0 : static INLINE void set_txfm_ctxs(TX_SIZE tx_size, int n8_w, int n8_h, int skip,
906 : const MACROBLOCKD *xd) {
907 0 : uint8_t bw = tx_size_wide[tx_size];
908 0 : uint8_t bh = tx_size_high[tx_size];
909 :
910 0 : if (skip) {
911 0 : bw = n8_w * MI_SIZE;
912 0 : bh = n8_h * MI_SIZE;
913 : }
914 :
915 0 : set_txfm_ctx(xd->above_txfm_context, bw, n8_w << TX_UNIT_WIDE_LOG2);
916 0 : set_txfm_ctx(xd->left_txfm_context, bh, n8_h << TX_UNIT_HIGH_LOG2);
917 0 : }
918 :
919 0 : static INLINE void txfm_partition_update(TXFM_CONTEXT *above_ctx,
920 : TXFM_CONTEXT *left_ctx,
921 : TX_SIZE tx_size, TX_SIZE txb_size) {
922 0 : BLOCK_SIZE bsize = txsize_to_bsize[txb_size];
923 0 : int bh = mi_size_high[bsize] << TX_UNIT_HIGH_LOG2;
924 0 : int bw = mi_size_wide[bsize] << TX_UNIT_WIDE_LOG2;
925 0 : uint8_t txw = tx_size_wide[tx_size];
926 0 : uint8_t txh = tx_size_high[tx_size];
927 : int i;
928 0 : for (i = 0; i < bh; ++i) left_ctx[i] = txh;
929 0 : for (i = 0; i < bw; ++i) above_ctx[i] = txw;
930 0 : }
931 :
932 0 : static INLINE TX_SIZE get_sqr_tx_size(int tx_dim) {
933 : TX_SIZE tx_size;
934 0 : switch (tx_dim) {
935 : #if CONFIG_EXT_PARTITION
936 : case 128:
937 : #endif
938 : case 64:
939 0 : case 32: tx_size = TX_32X32; break;
940 0 : case 16: tx_size = TX_16X16; break;
941 0 : case 8: tx_size = TX_8X8; break;
942 0 : default: tx_size = TX_4X4;
943 : }
944 0 : return tx_size;
945 : }
946 :
947 0 : static INLINE int txfm_partition_context(TXFM_CONTEXT *above_ctx,
948 : TXFM_CONTEXT *left_ctx,
949 : BLOCK_SIZE bsize, TX_SIZE tx_size) {
950 0 : const uint8_t txw = tx_size_wide[tx_size];
951 0 : const uint8_t txh = tx_size_high[tx_size];
952 0 : const int above = *above_ctx < txw;
953 0 : const int left = *left_ctx < txh;
954 0 : int category = TXFM_PARTITION_CONTEXTS - 1;
955 :
956 : // dummy return, not used by others.
957 0 : if (tx_size <= TX_4X4) return 0;
958 :
959 0 : TX_SIZE max_tx_size =
960 0 : get_sqr_tx_size(AOMMAX(block_size_wide[bsize], block_size_high[bsize]));
961 :
962 0 : if (max_tx_size >= TX_8X8) {
963 0 : category = (tx_size != max_tx_size && max_tx_size > TX_8X8) +
964 0 : (TX_SIZES - 1 - max_tx_size) * 2;
965 : }
966 0 : if (category == TXFM_PARTITION_CONTEXTS - 1) return category;
967 0 : return category * 3 + above + left;
968 : }
969 : #endif
970 :
971 0 : static INLINE PARTITION_TYPE get_partition(const AV1_COMMON *const cm,
972 : int mi_row, int mi_col,
973 : BLOCK_SIZE bsize) {
974 0 : if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) {
975 0 : return PARTITION_INVALID;
976 : } else {
977 0 : const int offset = mi_row * cm->mi_stride + mi_col;
978 0 : MODE_INFO **mi = cm->mi_grid_visible + offset;
979 0 : const MB_MODE_INFO *const mbmi = &mi[0]->mbmi;
980 0 : const int bsl = b_width_log2_lookup[bsize];
981 0 : const PARTITION_TYPE partition = partition_lookup[bsl][mbmi->sb_type];
982 : #if !CONFIG_EXT_PARTITION_TYPES
983 0 : return partition;
984 : #else
985 : const int hbs = mi_size_wide[bsize] / 2;
986 :
987 : assert(cm->mi_grid_visible[offset] == &cm->mi[offset]);
988 :
989 : if (partition != PARTITION_NONE && bsize > BLOCK_8X8 &&
990 : mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
991 : const BLOCK_SIZE h = get_subsize(bsize, PARTITION_HORZ_A);
992 : const BLOCK_SIZE v = get_subsize(bsize, PARTITION_VERT_A);
993 : const MB_MODE_INFO *const mbmi_right = &mi[hbs]->mbmi;
994 : const MB_MODE_INFO *const mbmi_below = &mi[hbs * cm->mi_stride]->mbmi;
995 : if (mbmi->sb_type == h) {
996 : return mbmi_below->sb_type == h ? PARTITION_HORZ : PARTITION_HORZ_B;
997 : } else if (mbmi->sb_type == v) {
998 : return mbmi_right->sb_type == v ? PARTITION_VERT : PARTITION_VERT_B;
999 : } else if (mbmi_below->sb_type == h) {
1000 : return PARTITION_HORZ_A;
1001 : } else if (mbmi_right->sb_type == v) {
1002 : return PARTITION_VERT_A;
1003 : } else {
1004 : return PARTITION_SPLIT;
1005 : }
1006 : }
1007 :
1008 : return partition;
1009 : #endif // !CONFIG_EXT_PARTITION_TYPES
1010 : }
1011 : }
1012 :
1013 0 : static INLINE void set_sb_size(AV1_COMMON *const cm, BLOCK_SIZE sb_size) {
1014 0 : cm->sb_size = sb_size;
1015 0 : cm->mib_size = mi_size_wide[cm->sb_size];
1016 : #if CONFIG_CB4X4
1017 0 : cm->mib_size_log2 = b_width_log2_lookup[cm->sb_size];
1018 : #else
1019 : cm->mib_size_log2 = mi_width_log2_lookup[cm->sb_size];
1020 : #endif
1021 0 : }
1022 :
1023 : #ifdef __cplusplus
1024 : } // extern "C"
1025 : #endif
1026 :
1027 : #endif // AV1_COMMON_ONYXC_INT_H_
|