LCOV - code coverage report
Current view: top level - third_party/aom/aom_dsp/x86 - sum_squares_sse2.c (source / functions) Hit Total Coverage
Test: output.info Lines: 0 111 0.0 %
Date: 2017-07-14 16:53:18 Functions: 0 5 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
       3             :  *
       4             :  * This source code is subject to the terms of the BSD 2 Clause License and
       5             :  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
       6             :  * was not distributed with this source code in the LICENSE file, you can
       7             :  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
       8             :  * Media Patent License 1.0 was not distributed with this source code in the
       9             :  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
      10             :  */
      11             : 
      12             : #include <assert.h>
      13             : #include <emmintrin.h>
      14             : #include <stdio.h>
      15             : 
      16             : #include "aom_dsp/x86/synonyms.h"
      17             : 
      18             : #include "./aom_dsp_rtcd.h"
      19             : 
      20           0 : static uint64_t aom_sum_squares_2d_i16_4x4_sse2(const int16_t *src,
      21             :                                                 int stride) {
      22           0 :   const __m128i v_val_0_w =
      23             :       _mm_loadl_epi64((const __m128i *)(src + 0 * stride));
      24           0 :   const __m128i v_val_1_w =
      25           0 :       _mm_loadl_epi64((const __m128i *)(src + 1 * stride));
      26           0 :   const __m128i v_val_2_w =
      27           0 :       _mm_loadl_epi64((const __m128i *)(src + 2 * stride));
      28           0 :   const __m128i v_val_3_w =
      29           0 :       _mm_loadl_epi64((const __m128i *)(src + 3 * stride));
      30             : 
      31           0 :   const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w);
      32           0 :   const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w);
      33           0 :   const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w);
      34           0 :   const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w);
      35             : 
      36           0 :   const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d);
      37           0 :   const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d);
      38           0 :   const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d);
      39             : 
      40           0 :   const __m128i v_sum_d =
      41           0 :       _mm_add_epi32(v_sum_0123_d, _mm_srli_epi64(v_sum_0123_d, 32));
      42             : 
      43           0 :   return (uint64_t)_mm_cvtsi128_si32(v_sum_d);
      44             : }
      45             : 
      46             : #ifdef __GNUC__
      47             : // This prevents GCC/Clang from inlining this function into
      48             : // aom_sum_squares_2d_i16_sse2, which in turn saves some stack
      49             : // maintenance instructions in the common case of 4x4.
      50             : __attribute__((noinline))
      51             : #endif
      52             : static uint64_t
      53           0 : aom_sum_squares_2d_i16_nxn_sse2(const int16_t *src, int stride, int width,
      54             :                                 int height) {
      55             :   int r, c;
      56             : 
      57           0 :   const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
      58           0 :   __m128i v_acc_q = _mm_setzero_si128();
      59             : 
      60           0 :   for (r = 0; r < height; r += 8) {
      61           0 :     __m128i v_acc_d = _mm_setzero_si128();
      62             : 
      63           0 :     for (c = 0; c < width; c += 8) {
      64           0 :       const int16_t *b = src + c;
      65             : 
      66           0 :       const __m128i v_val_0_w =
      67             :           _mm_load_si128((const __m128i *)(b + 0 * stride));
      68           0 :       const __m128i v_val_1_w =
      69           0 :           _mm_load_si128((const __m128i *)(b + 1 * stride));
      70           0 :       const __m128i v_val_2_w =
      71           0 :           _mm_load_si128((const __m128i *)(b + 2 * stride));
      72           0 :       const __m128i v_val_3_w =
      73           0 :           _mm_load_si128((const __m128i *)(b + 3 * stride));
      74           0 :       const __m128i v_val_4_w =
      75           0 :           _mm_load_si128((const __m128i *)(b + 4 * stride));
      76           0 :       const __m128i v_val_5_w =
      77           0 :           _mm_load_si128((const __m128i *)(b + 5 * stride));
      78           0 :       const __m128i v_val_6_w =
      79           0 :           _mm_load_si128((const __m128i *)(b + 6 * stride));
      80           0 :       const __m128i v_val_7_w =
      81           0 :           _mm_load_si128((const __m128i *)(b + 7 * stride));
      82             : 
      83           0 :       const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w);
      84           0 :       const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w);
      85           0 :       const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w);
      86           0 :       const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w);
      87           0 :       const __m128i v_sq_4_d = _mm_madd_epi16(v_val_4_w, v_val_4_w);
      88           0 :       const __m128i v_sq_5_d = _mm_madd_epi16(v_val_5_w, v_val_5_w);
      89           0 :       const __m128i v_sq_6_d = _mm_madd_epi16(v_val_6_w, v_val_6_w);
      90           0 :       const __m128i v_sq_7_d = _mm_madd_epi16(v_val_7_w, v_val_7_w);
      91             : 
      92           0 :       const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d);
      93           0 :       const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d);
      94           0 :       const __m128i v_sum_45_d = _mm_add_epi32(v_sq_4_d, v_sq_5_d);
      95           0 :       const __m128i v_sum_67_d = _mm_add_epi32(v_sq_6_d, v_sq_7_d);
      96             : 
      97           0 :       const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d);
      98           0 :       const __m128i v_sum_4567_d = _mm_add_epi32(v_sum_45_d, v_sum_67_d);
      99             : 
     100           0 :       v_acc_d = _mm_add_epi32(v_acc_d, v_sum_0123_d);
     101           0 :       v_acc_d = _mm_add_epi32(v_acc_d, v_sum_4567_d);
     102             :     }
     103             : 
     104           0 :     v_acc_q = _mm_add_epi64(v_acc_q, _mm_and_si128(v_acc_d, v_zext_mask_q));
     105           0 :     v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_epi64(v_acc_d, 32));
     106             : 
     107           0 :     src += 8 * stride;
     108             :   }
     109             : 
     110           0 :   v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8));
     111             : 
     112             : #if ARCH_X86_64
     113           0 :   return (uint64_t)_mm_cvtsi128_si64(v_acc_q);
     114             : #else
     115             :   {
     116             :     uint64_t tmp;
     117             :     _mm_storel_epi64((__m128i *)&tmp, v_acc_q);
     118             :     return tmp;
     119             :   }
     120             : #endif
     121             : }
     122             : 
     123           0 : uint64_t aom_sum_squares_2d_i16_sse2(const int16_t *src, int stride, int width,
     124             :                                      int height) {
     125             :   // 4 elements per row only requires half an XMM register, so this
     126             :   // must be a special case, but also note that over 75% of all calls
     127             :   // are with size == 4, so it is also the common case.
     128           0 :   if (LIKELY(width == 4 && height == 4)) {
     129           0 :     return aom_sum_squares_2d_i16_4x4_sse2(src, stride);
     130           0 :   } else if (LIKELY(width % 8 == 0 && height % 8 == 0)) {
     131             :     // Generic case
     132           0 :     return aom_sum_squares_2d_i16_nxn_sse2(src, stride, width, height);
     133             :   } else {
     134           0 :     return aom_sum_squares_2d_i16_c(src, stride, width, height);
     135             :   }
     136             : }
     137             : 
     138             : //////////////////////////////////////////////////////////////////////////////
     139             : // 1D version
     140             : //////////////////////////////////////////////////////////////////////////////
     141             : 
     142           0 : static uint64_t aom_sum_squares_i16_64n_sse2(const int16_t *src, uint32_t n) {
     143           0 :   const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
     144           0 :   __m128i v_acc0_q = _mm_setzero_si128();
     145           0 :   __m128i v_acc1_q = _mm_setzero_si128();
     146             : 
     147           0 :   const int16_t *const end = src + n;
     148             : 
     149           0 :   assert(n % 64 == 0);
     150             : 
     151           0 :   while (src < end) {
     152           0 :     const __m128i v_val_0_w = xx_load_128(src);
     153           0 :     const __m128i v_val_1_w = xx_load_128(src + 8);
     154           0 :     const __m128i v_val_2_w = xx_load_128(src + 16);
     155           0 :     const __m128i v_val_3_w = xx_load_128(src + 24);
     156           0 :     const __m128i v_val_4_w = xx_load_128(src + 32);
     157           0 :     const __m128i v_val_5_w = xx_load_128(src + 40);
     158           0 :     const __m128i v_val_6_w = xx_load_128(src + 48);
     159           0 :     const __m128i v_val_7_w = xx_load_128(src + 56);
     160             : 
     161           0 :     const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w);
     162           0 :     const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w);
     163           0 :     const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w);
     164           0 :     const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w);
     165           0 :     const __m128i v_sq_4_d = _mm_madd_epi16(v_val_4_w, v_val_4_w);
     166           0 :     const __m128i v_sq_5_d = _mm_madd_epi16(v_val_5_w, v_val_5_w);
     167           0 :     const __m128i v_sq_6_d = _mm_madd_epi16(v_val_6_w, v_val_6_w);
     168           0 :     const __m128i v_sq_7_d = _mm_madd_epi16(v_val_7_w, v_val_7_w);
     169             : 
     170           0 :     const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d);
     171           0 :     const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d);
     172           0 :     const __m128i v_sum_45_d = _mm_add_epi32(v_sq_4_d, v_sq_5_d);
     173           0 :     const __m128i v_sum_67_d = _mm_add_epi32(v_sq_6_d, v_sq_7_d);
     174             : 
     175           0 :     const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d);
     176           0 :     const __m128i v_sum_4567_d = _mm_add_epi32(v_sum_45_d, v_sum_67_d);
     177             : 
     178           0 :     const __m128i v_sum_d = _mm_add_epi32(v_sum_0123_d, v_sum_4567_d);
     179             : 
     180           0 :     v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_and_si128(v_sum_d, v_zext_mask_q));
     181           0 :     v_acc1_q = _mm_add_epi64(v_acc1_q, _mm_srli_epi64(v_sum_d, 32));
     182             : 
     183           0 :     src += 64;
     184             :   }
     185             : 
     186           0 :   v_acc0_q = _mm_add_epi64(v_acc0_q, v_acc1_q);
     187           0 :   v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_srli_si128(v_acc0_q, 8));
     188             : 
     189             : #if ARCH_X86_64
     190           0 :   return (uint64_t)_mm_cvtsi128_si64(v_acc0_q);
     191             : #else
     192             :   {
     193             :     uint64_t tmp;
     194             :     _mm_storel_epi64((__m128i *)&tmp, v_acc0_q);
     195             :     return tmp;
     196             :   }
     197             : #endif
     198             : }
     199             : 
     200           0 : uint64_t aom_sum_squares_i16_sse2(const int16_t *src, uint32_t n) {
     201           0 :   if (n % 64 == 0) {
     202           0 :     return aom_sum_squares_i16_64n_sse2(src, n);
     203           0 :   } else if (n > 64) {
     204           0 :     int k = n & ~(64 - 1);
     205           0 :     return aom_sum_squares_i16_64n_sse2(src, k) +
     206           0 :            aom_sum_squares_i16_c(src + k, n - k);
     207             :   } else {
     208           0 :     return aom_sum_squares_i16_c(src, n);
     209             :   }
     210             : }

Generated by: LCOV version 1.13