LCOV - code coverage report
Current view: top level - media/libvpx/libvpx/vp8/common - reconinter.c (source / functions) Hit Total Coverage
Test: output.info Lines: 0 266 0.0 %
Date: 2017-07-14 16:53:18 Functions: 0 16 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*
       2             :  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
       3             :  *
       4             :  *  Use of this source code is governed by a BSD-style license
       5             :  *  that can be found in the LICENSE file in the root of the source
       6             :  *  tree. An additional intellectual property rights grant can be found
       7             :  *  in the file PATENTS.  All contributing project authors may
       8             :  *  be found in the AUTHORS file in the root of the source tree.
       9             :  */
      10             : 
      11             : #include <limits.h>
      12             : #include <string.h>
      13             : 
      14             : #include "vpx_config.h"
      15             : #include "vp8_rtcd.h"
      16             : #include "vpx/vpx_integer.h"
      17             : #include "blockd.h"
      18             : #include "reconinter.h"
      19             : #if CONFIG_RUNTIME_CPU_DETECT
      20             : #include "onyxc_int.h"
      21             : #endif
      22             : 
      23           0 : void vp8_copy_mem16x16_c(unsigned char *src, int src_stride, unsigned char *dst,
      24             :                          int dst_stride) {
      25             :   int r;
      26             : 
      27           0 :   for (r = 0; r < 16; ++r) {
      28           0 :     memcpy(dst, src, 16);
      29             : 
      30           0 :     src += src_stride;
      31           0 :     dst += dst_stride;
      32             :   }
      33           0 : }
      34             : 
      35           0 : void vp8_copy_mem8x8_c(unsigned char *src, int src_stride, unsigned char *dst,
      36             :                        int dst_stride) {
      37             :   int r;
      38             : 
      39           0 :   for (r = 0; r < 8; ++r) {
      40           0 :     memcpy(dst, src, 8);
      41             : 
      42           0 :     src += src_stride;
      43           0 :     dst += dst_stride;
      44             :   }
      45           0 : }
      46             : 
      47           0 : void vp8_copy_mem8x4_c(unsigned char *src, int src_stride, unsigned char *dst,
      48             :                        int dst_stride) {
      49             :   int r;
      50             : 
      51           0 :   for (r = 0; r < 4; ++r) {
      52           0 :     memcpy(dst, src, 8);
      53             : 
      54           0 :     src += src_stride;
      55           0 :     dst += dst_stride;
      56             :   }
      57           0 : }
      58             : 
      59           0 : void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre,
      60             :                                   int pre_stride, vp8_subpix_fn_t sppf) {
      61             :   int r;
      62           0 :   unsigned char *pred_ptr = d->predictor;
      63             :   unsigned char *ptr;
      64           0 :   ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
      65           0 :         (d->bmi.mv.as_mv.col >> 3);
      66             : 
      67           0 :   if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
      68           0 :     sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7,
      69             :          pred_ptr, pitch);
      70             :   } else {
      71           0 :     for (r = 0; r < 4; ++r) {
      72           0 :       pred_ptr[0] = ptr[0];
      73           0 :       pred_ptr[1] = ptr[1];
      74           0 :       pred_ptr[2] = ptr[2];
      75           0 :       pred_ptr[3] = ptr[3];
      76           0 :       pred_ptr += pitch;
      77           0 :       ptr += pre_stride;
      78             :     }
      79             :   }
      80           0 : }
      81             : 
      82           0 : static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d,
      83             :                                      unsigned char *dst, int dst_stride,
      84             :                                      unsigned char *base_pre, int pre_stride) {
      85             :   unsigned char *ptr;
      86           0 :   ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
      87           0 :         (d->bmi.mv.as_mv.col >> 3);
      88             : 
      89           0 :   if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
      90           0 :     x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7,
      91           0 :                            d->bmi.mv.as_mv.row & 7, dst, dst_stride);
      92             :   } else {
      93           0 :     vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride);
      94             :   }
      95           0 : }
      96             : 
      97           0 : static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d,
      98             :                                      unsigned char *dst, int dst_stride,
      99             :                                      unsigned char *base_pre, int pre_stride) {
     100             :   unsigned char *ptr;
     101           0 :   ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
     102           0 :         (d->bmi.mv.as_mv.col >> 3);
     103             : 
     104           0 :   if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
     105           0 :     x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7,
     106           0 :                            d->bmi.mv.as_mv.row & 7, dst, dst_stride);
     107             :   } else {
     108           0 :     vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride);
     109             :   }
     110           0 : }
     111             : 
     112           0 : static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst,
     113             :                                      int dst_stride, unsigned char *base_pre,
     114             :                                      int pre_stride, vp8_subpix_fn_t sppf) {
     115             :   int r;
     116             :   unsigned char *ptr;
     117           0 :   ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride +
     118           0 :         (d->bmi.mv.as_mv.col >> 3);
     119             : 
     120           0 :   if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) {
     121           0 :     sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst,
     122             :          dst_stride);
     123             :   } else {
     124           0 :     for (r = 0; r < 4; ++r) {
     125           0 :       dst[0] = ptr[0];
     126           0 :       dst[1] = ptr[1];
     127           0 :       dst[2] = ptr[2];
     128           0 :       dst[3] = ptr[3];
     129           0 :       dst += dst_stride;
     130           0 :       ptr += pre_stride;
     131             :     }
     132             :   }
     133           0 : }
     134             : 
     135             : /*encoder only*/
     136           0 : void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x) {
     137             :   unsigned char *uptr, *vptr;
     138           0 :   unsigned char *upred_ptr = &x->predictor[256];
     139           0 :   unsigned char *vpred_ptr = &x->predictor[320];
     140             : 
     141           0 :   int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
     142           0 :   int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
     143             :   int offset;
     144           0 :   int pre_stride = x->pre.uv_stride;
     145             : 
     146             :   /* calc uv motion vectors */
     147           0 :   mv_row += 1 | (mv_row >> (sizeof(int) * CHAR_BIT - 1));
     148           0 :   mv_col += 1 | (mv_col >> (sizeof(int) * CHAR_BIT - 1));
     149           0 :   mv_row /= 2;
     150           0 :   mv_col /= 2;
     151           0 :   mv_row &= x->fullpixel_mask;
     152           0 :   mv_col &= x->fullpixel_mask;
     153             : 
     154           0 :   offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
     155           0 :   uptr = x->pre.u_buffer + offset;
     156           0 :   vptr = x->pre.v_buffer + offset;
     157             : 
     158           0 :   if ((mv_row | mv_col) & 7) {
     159           0 :     x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr,
     160             :                            8);
     161           0 :     x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr,
     162             :                            8);
     163             :   } else {
     164           0 :     vp8_copy_mem8x8(uptr, pre_stride, upred_ptr, 8);
     165           0 :     vp8_copy_mem8x8(vptr, pre_stride, vpred_ptr, 8);
     166             :   }
     167           0 : }
     168             : 
     169             : /*encoder only*/
     170           0 : void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) {
     171             :   int i, j;
     172           0 :   int pre_stride = x->pre.uv_stride;
     173             :   unsigned char *base_pre;
     174             : 
     175             :   /* build uv mvs */
     176           0 :   for (i = 0; i < 2; ++i) {
     177           0 :     for (j = 0; j < 2; ++j) {
     178           0 :       int yoffset = i * 8 + j * 2;
     179           0 :       int uoffset = 16 + i * 2 + j;
     180           0 :       int voffset = 20 + i * 2 + j;
     181             : 
     182             :       int temp;
     183             : 
     184           0 :       temp = x->block[yoffset].bmi.mv.as_mv.row +
     185           0 :              x->block[yoffset + 1].bmi.mv.as_mv.row +
     186           0 :              x->block[yoffset + 4].bmi.mv.as_mv.row +
     187           0 :              x->block[yoffset + 5].bmi.mv.as_mv.row;
     188             : 
     189           0 :       temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
     190             : 
     191           0 :       x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
     192             : 
     193           0 :       temp = x->block[yoffset].bmi.mv.as_mv.col +
     194           0 :              x->block[yoffset + 1].bmi.mv.as_mv.col +
     195           0 :              x->block[yoffset + 4].bmi.mv.as_mv.col +
     196           0 :              x->block[yoffset + 5].bmi.mv.as_mv.col;
     197             : 
     198           0 :       temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
     199             : 
     200           0 :       x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
     201             : 
     202           0 :       x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
     203             :     }
     204             :   }
     205             : 
     206           0 :   base_pre = x->pre.u_buffer;
     207           0 :   for (i = 16; i < 20; i += 2) {
     208           0 :     BLOCKD *d0 = &x->block[i];
     209           0 :     BLOCKD *d1 = &x->block[i + 1];
     210             : 
     211           0 :     if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
     212           0 :       build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
     213             :     } else {
     214           0 :       vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride,
     215             :                                    x->subpixel_predict);
     216           0 :       vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride,
     217             :                                    x->subpixel_predict);
     218             :     }
     219             :   }
     220             : 
     221           0 :   base_pre = x->pre.v_buffer;
     222           0 :   for (i = 20; i < 24; i += 2) {
     223           0 :     BLOCKD *d0 = &x->block[i];
     224           0 :     BLOCKD *d1 = &x->block[i + 1];
     225             : 
     226           0 :     if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
     227           0 :       build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
     228             :     } else {
     229           0 :       vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride,
     230             :                                    x->subpixel_predict);
     231           0 :       vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride,
     232             :                                    x->subpixel_predict);
     233             :     }
     234             :   }
     235           0 : }
     236             : 
     237             : /*encoder only*/
     238           0 : void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, unsigned char *dst_y,
     239             :                                          int dst_ystride) {
     240             :   unsigned char *ptr_base;
     241             :   unsigned char *ptr;
     242           0 :   int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
     243           0 :   int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
     244           0 :   int pre_stride = x->pre.y_stride;
     245             : 
     246           0 :   ptr_base = x->pre.y_buffer;
     247           0 :   ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
     248             : 
     249           0 :   if ((mv_row | mv_col) & 7) {
     250           0 :     x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_y,
     251             :                              dst_ystride);
     252             :   } else {
     253           0 :     vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
     254             :   }
     255           0 : }
     256             : 
     257           0 : static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
     258             :   /* If the MV points so far into the UMV border that no visible pixels
     259             :    * are used for reconstruction, the subpel part of the MV can be
     260             :    * discarded and the MV limited to 16 pixels with equivalent results.
     261             :    *
     262             :    * This limit kicks in at 19 pixels for the top and left edges, for
     263             :    * the 16 pixels plus 3 taps right of the central pixel when subpel
     264             :    * filtering. The bottom and right edges use 16 pixels plus 2 pixels
     265             :    * left of the central pixel when filtering.
     266             :    */
     267           0 :   if (mv->col < (xd->mb_to_left_edge - (19 << 3))) {
     268           0 :     mv->col = xd->mb_to_left_edge - (16 << 3);
     269           0 :   } else if (mv->col > xd->mb_to_right_edge + (18 << 3)) {
     270           0 :     mv->col = xd->mb_to_right_edge + (16 << 3);
     271             :   }
     272             : 
     273           0 :   if (mv->row < (xd->mb_to_top_edge - (19 << 3))) {
     274           0 :     mv->row = xd->mb_to_top_edge - (16 << 3);
     275           0 :   } else if (mv->row > xd->mb_to_bottom_edge + (18 << 3)) {
     276           0 :     mv->row = xd->mb_to_bottom_edge + (16 << 3);
     277             :   }
     278           0 : }
     279             : 
     280             : /* A version of the above function for chroma block MVs.*/
     281           0 : static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
     282           0 :   mv->col = (2 * mv->col < (xd->mb_to_left_edge - (19 << 3)))
     283           0 :                 ? (xd->mb_to_left_edge - (16 << 3)) >> 1
     284             :                 : mv->col;
     285           0 :   mv->col = (2 * mv->col > xd->mb_to_right_edge + (18 << 3))
     286           0 :                 ? (xd->mb_to_right_edge + (16 << 3)) >> 1
     287             :                 : mv->col;
     288             : 
     289           0 :   mv->row = (2 * mv->row < (xd->mb_to_top_edge - (19 << 3)))
     290           0 :                 ? (xd->mb_to_top_edge - (16 << 3)) >> 1
     291             :                 : mv->row;
     292           0 :   mv->row = (2 * mv->row > xd->mb_to_bottom_edge + (18 << 3))
     293           0 :                 ? (xd->mb_to_bottom_edge + (16 << 3)) >> 1
     294             :                 : mv->row;
     295           0 : }
     296             : 
     297           0 : void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, unsigned char *dst_y,
     298             :                                         unsigned char *dst_u,
     299             :                                         unsigned char *dst_v, int dst_ystride,
     300             :                                         int dst_uvstride) {
     301             :   int offset;
     302             :   unsigned char *ptr;
     303             :   unsigned char *uptr, *vptr;
     304             : 
     305             :   int_mv _16x16mv;
     306             : 
     307           0 :   unsigned char *ptr_base = x->pre.y_buffer;
     308           0 :   int pre_stride = x->pre.y_stride;
     309             : 
     310           0 :   _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int;
     311             : 
     312           0 :   if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
     313           0 :     clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
     314             :   }
     315             : 
     316           0 :   ptr = ptr_base + (_16x16mv.as_mv.row >> 3) * pre_stride +
     317           0 :         (_16x16mv.as_mv.col >> 3);
     318             : 
     319           0 :   if (_16x16mv.as_int & 0x00070007) {
     320           0 :     x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7,
     321           0 :                              _16x16mv.as_mv.row & 7, dst_y, dst_ystride);
     322             :   } else {
     323           0 :     vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
     324             :   }
     325             : 
     326             :   /* calc uv motion vectors */
     327           0 :   _16x16mv.as_mv.row +=
     328           0 :       1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1));
     329           0 :   _16x16mv.as_mv.col +=
     330           0 :       1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1));
     331           0 :   _16x16mv.as_mv.row /= 2;
     332           0 :   _16x16mv.as_mv.col /= 2;
     333           0 :   _16x16mv.as_mv.row &= x->fullpixel_mask;
     334           0 :   _16x16mv.as_mv.col &= x->fullpixel_mask;
     335             : 
     336           0 :   pre_stride >>= 1;
     337           0 :   offset = (_16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
     338           0 :   uptr = x->pre.u_buffer + offset;
     339           0 :   vptr = x->pre.v_buffer + offset;
     340             : 
     341           0 :   if (_16x16mv.as_int & 0x00070007) {
     342           0 :     x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7,
     343           0 :                            _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
     344           0 :     x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7,
     345           0 :                            _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
     346             :   } else {
     347           0 :     vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
     348           0 :     vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
     349             :   }
     350           0 : }
     351             : 
     352           0 : static void build_inter4x4_predictors_mb(MACROBLOCKD *x) {
     353             :   int i;
     354           0 :   unsigned char *base_dst = x->dst.y_buffer;
     355           0 :   unsigned char *base_pre = x->pre.y_buffer;
     356             : 
     357           0 :   if (x->mode_info_context->mbmi.partitioning < 3) {
     358             :     BLOCKD *b;
     359           0 :     int dst_stride = x->dst.y_stride;
     360             : 
     361           0 :     x->block[0].bmi = x->mode_info_context->bmi[0];
     362           0 :     x->block[2].bmi = x->mode_info_context->bmi[2];
     363           0 :     x->block[8].bmi = x->mode_info_context->bmi[8];
     364           0 :     x->block[10].bmi = x->mode_info_context->bmi[10];
     365           0 :     if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
     366           0 :       clamp_mv_to_umv_border(&x->block[0].bmi.mv.as_mv, x);
     367           0 :       clamp_mv_to_umv_border(&x->block[2].bmi.mv.as_mv, x);
     368           0 :       clamp_mv_to_umv_border(&x->block[8].bmi.mv.as_mv, x);
     369           0 :       clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x);
     370             :     }
     371             : 
     372           0 :     b = &x->block[0];
     373           0 :     build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
     374             :                              dst_stride);
     375           0 :     b = &x->block[2];
     376           0 :     build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
     377             :                              dst_stride);
     378           0 :     b = &x->block[8];
     379           0 :     build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
     380             :                              dst_stride);
     381           0 :     b = &x->block[10];
     382           0 :     build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre,
     383             :                              dst_stride);
     384             :   } else {
     385           0 :     for (i = 0; i < 16; i += 2) {
     386           0 :       BLOCKD *d0 = &x->block[i];
     387           0 :       BLOCKD *d1 = &x->block[i + 1];
     388           0 :       int dst_stride = x->dst.y_stride;
     389             : 
     390           0 :       x->block[i + 0].bmi = x->mode_info_context->bmi[i + 0];
     391           0 :       x->block[i + 1].bmi = x->mode_info_context->bmi[i + 1];
     392           0 :       if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
     393           0 :         clamp_mv_to_umv_border(&x->block[i + 0].bmi.mv.as_mv, x);
     394           0 :         clamp_mv_to_umv_border(&x->block[i + 1].bmi.mv.as_mv, x);
     395             :       }
     396             : 
     397           0 :       if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
     398           0 :         build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
     399             :                                  base_pre, dst_stride);
     400             :       } else {
     401           0 :         build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride,
     402             :                                  base_pre, dst_stride, x->subpixel_predict);
     403           0 :         build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride,
     404             :                                  base_pre, dst_stride, x->subpixel_predict);
     405             :       }
     406             :     }
     407             :   }
     408           0 :   base_dst = x->dst.u_buffer;
     409           0 :   base_pre = x->pre.u_buffer;
     410           0 :   for (i = 16; i < 20; i += 2) {
     411           0 :     BLOCKD *d0 = &x->block[i];
     412           0 :     BLOCKD *d1 = &x->block[i + 1];
     413           0 :     int dst_stride = x->dst.uv_stride;
     414             : 
     415             :     /* Note: uv mvs already clamped in build_4x4uvmvs() */
     416             : 
     417           0 :     if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
     418           0 :       build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
     419             :                                base_pre, dst_stride);
     420             :     } else {
     421           0 :       build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre,
     422             :                                dst_stride, x->subpixel_predict);
     423           0 :       build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre,
     424             :                                dst_stride, x->subpixel_predict);
     425             :     }
     426             :   }
     427             : 
     428           0 :   base_dst = x->dst.v_buffer;
     429           0 :   base_pre = x->pre.v_buffer;
     430           0 :   for (i = 20; i < 24; i += 2) {
     431           0 :     BLOCKD *d0 = &x->block[i];
     432           0 :     BLOCKD *d1 = &x->block[i + 1];
     433           0 :     int dst_stride = x->dst.uv_stride;
     434             : 
     435             :     /* Note: uv mvs already clamped in build_4x4uvmvs() */
     436             : 
     437           0 :     if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
     438           0 :       build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
     439             :                                base_pre, dst_stride);
     440             :     } else {
     441           0 :       build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre,
     442             :                                dst_stride, x->subpixel_predict);
     443           0 :       build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre,
     444             :                                dst_stride, x->subpixel_predict);
     445             :     }
     446             :   }
     447           0 : }
     448             : 
     449           0 : static void build_4x4uvmvs(MACROBLOCKD *x) {
     450             :   int i, j;
     451             : 
     452           0 :   for (i = 0; i < 2; ++i) {
     453           0 :     for (j = 0; j < 2; ++j) {
     454           0 :       int yoffset = i * 8 + j * 2;
     455           0 :       int uoffset = 16 + i * 2 + j;
     456           0 :       int voffset = 20 + i * 2 + j;
     457             : 
     458             :       int temp;
     459             : 
     460           0 :       temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row +
     461           0 :              x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row +
     462           0 :              x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row +
     463           0 :              x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row;
     464             : 
     465           0 :       temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
     466             : 
     467           0 :       x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
     468             : 
     469           0 :       temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col +
     470           0 :              x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col +
     471           0 :              x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col +
     472           0 :              x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col;
     473             : 
     474           0 :       temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8);
     475             : 
     476           0 :       x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
     477             : 
     478           0 :       if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
     479           0 :         clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
     480             :       }
     481             : 
     482           0 :       x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
     483             :     }
     484             :   }
     485           0 : }
     486             : 
     487           0 : void vp8_build_inter_predictors_mb(MACROBLOCKD *xd) {
     488           0 :   if (xd->mode_info_context->mbmi.mode != SPLITMV) {
     489           0 :     vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer,
     490           0 :                                        xd->dst.v_buffer, xd->dst.y_stride,
     491             :                                        xd->dst.uv_stride);
     492             :   } else {
     493           0 :     build_4x4uvmvs(xd);
     494           0 :     build_inter4x4_predictors_mb(xd);
     495             :   }
     496           0 : }

Generated by: LCOV version 1.13