LCOV - code coverage report
Current view: top level - third_party/aom/aom_dsp/x86 - loopfilter_avx2.c (source / functions) Hit Total Coverage
Test: output.info Lines: 0 569 0.0 %
Date: 2017-07-14 16:53:18 Functions: 0 2 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
       3             :  *
       4             :  * This source code is subject to the terms of the BSD 2 Clause License and
       5             :  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
       6             :  * was not distributed with this source code in the LICENSE file, you can
       7             :  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
       8             :  * Media Patent License 1.0 was not distributed with this source code in the
       9             :  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
      10             :  */
      11             : 
      12             : #include <immintrin.h> /* AVX2 */
      13             : 
      14             : #include "./aom_dsp_rtcd.h"
      15             : #include "aom_ports/mem.h"
      16             : 
      17           0 : void aom_lpf_horizontal_edge_8_avx2(unsigned char *s, int p,
      18             :                                     const unsigned char *_blimit,
      19             :                                     const unsigned char *_limit,
      20             :                                     const unsigned char *_thresh) {
      21             :   __m128i mask, hev, flat, flat2;
      22           0 :   const __m128i zero = _mm_set1_epi16(0);
      23           0 :   const __m128i one = _mm_set1_epi8(1);
      24             :   __m128i q7p7, q6p6, q5p5, q4p4, q3p3, q2p2, q1p1, q0p0, p0q0, p1q1;
      25             :   __m128i abs_p1p0;
      26             : 
      27           0 :   const __m128i thresh =
      28           0 :       _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_thresh[0]));
      29           0 :   const __m128i limit = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_limit[0]));
      30           0 :   const __m128i blimit =
      31           0 :       _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_blimit[0]));
      32             : 
      33           0 :   q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * p));
      34           0 :   q4p4 = _mm_castps_si128(
      35           0 :       _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *)(s + 4 * p)));
      36           0 :   q3p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p));
      37           0 :   q3p3 = _mm_castps_si128(
      38           0 :       _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *)(s + 3 * p)));
      39           0 :   q2p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p));
      40           0 :   q2p2 = _mm_castps_si128(
      41           0 :       _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *)(s + 2 * p)));
      42           0 :   q1p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p));
      43           0 :   q1p1 = _mm_castps_si128(
      44           0 :       _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *)(s + 1 * p)));
      45           0 :   p1q1 = _mm_shuffle_epi32(q1p1, 78);
      46           0 :   q0p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p));
      47           0 :   q0p0 = _mm_castps_si128(
      48             :       _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *)(s - 0 * p)));
      49           0 :   p0q0 = _mm_shuffle_epi32(q0p0, 78);
      50             : 
      51             :   {
      52             :     __m128i abs_p1q1, abs_p0q0, abs_q1q0, fe, ff, work;
      53           0 :     abs_p1p0 =
      54           0 :         _mm_or_si128(_mm_subs_epu8(q1p1, q0p0), _mm_subs_epu8(q0p0, q1p1));
      55           0 :     abs_q1q0 = _mm_srli_si128(abs_p1p0, 8);
      56           0 :     fe = _mm_set1_epi8(0xfe);
      57           0 :     ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
      58           0 :     abs_p0q0 =
      59           0 :         _mm_or_si128(_mm_subs_epu8(q0p0, p0q0), _mm_subs_epu8(p0q0, q0p0));
      60           0 :     abs_p1q1 =
      61           0 :         _mm_or_si128(_mm_subs_epu8(q1p1, p1q1), _mm_subs_epu8(p1q1, q1p1));
      62           0 :     flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
      63           0 :     hev = _mm_subs_epu8(flat, thresh);
      64           0 :     hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
      65             : 
      66           0 :     abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
      67           0 :     abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
      68           0 :     mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
      69           0 :     mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
      70             :     // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit) * -1;
      71           0 :     mask = _mm_max_epu8(abs_p1p0, mask);
      72             :     // mask |= (abs(p1 - p0) > limit) * -1;
      73             :     // mask |= (abs(q1 - q0) > limit) * -1;
      74             : 
      75           0 :     work = _mm_max_epu8(
      76             :         _mm_or_si128(_mm_subs_epu8(q2p2, q1p1), _mm_subs_epu8(q1p1, q2p2)),
      77             :         _mm_or_si128(_mm_subs_epu8(q3p3, q2p2), _mm_subs_epu8(q2p2, q3p3)));
      78           0 :     mask = _mm_max_epu8(work, mask);
      79           0 :     mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8));
      80           0 :     mask = _mm_subs_epu8(mask, limit);
      81           0 :     mask = _mm_cmpeq_epi8(mask, zero);
      82             :   }
      83             : 
      84             :   // lp filter
      85             :   {
      86           0 :     const __m128i t4 = _mm_set1_epi8(4);
      87           0 :     const __m128i t3 = _mm_set1_epi8(3);
      88           0 :     const __m128i t80 = _mm_set1_epi8(0x80);
      89           0 :     const __m128i t1 = _mm_set1_epi16(0x1);
      90           0 :     __m128i qs1ps1 = _mm_xor_si128(q1p1, t80);
      91           0 :     __m128i qs0ps0 = _mm_xor_si128(q0p0, t80);
      92           0 :     __m128i qs0 = _mm_xor_si128(p0q0, t80);
      93           0 :     __m128i qs1 = _mm_xor_si128(p1q1, t80);
      94             :     __m128i filt;
      95             :     __m128i work_a;
      96             :     __m128i filter1, filter2;
      97             :     __m128i flat2_q6p6, flat2_q5p5, flat2_q4p4, flat2_q3p3, flat2_q2p2;
      98             :     __m128i flat2_q1p1, flat2_q0p0, flat_q2p2, flat_q1p1, flat_q0p0;
      99             : 
     100           0 :     filt = _mm_and_si128(_mm_subs_epi8(qs1ps1, qs1), hev);
     101           0 :     work_a = _mm_subs_epi8(qs0, qs0ps0);
     102           0 :     filt = _mm_adds_epi8(filt, work_a);
     103           0 :     filt = _mm_adds_epi8(filt, work_a);
     104           0 :     filt = _mm_adds_epi8(filt, work_a);
     105             :     /* (aom_filter + 3 * (qs0 - ps0)) & mask */
     106           0 :     filt = _mm_and_si128(filt, mask);
     107             : 
     108           0 :     filter1 = _mm_adds_epi8(filt, t4);
     109           0 :     filter2 = _mm_adds_epi8(filt, t3);
     110             : 
     111           0 :     filter1 = _mm_unpacklo_epi8(zero, filter1);
     112           0 :     filter1 = _mm_srai_epi16(filter1, 0xB);
     113           0 :     filter2 = _mm_unpacklo_epi8(zero, filter2);
     114           0 :     filter2 = _mm_srai_epi16(filter2, 0xB);
     115             : 
     116             :     /* Filter1 >> 3 */
     117           0 :     filt = _mm_packs_epi16(filter2, _mm_subs_epi16(zero, filter1));
     118           0 :     qs0ps0 = _mm_xor_si128(_mm_adds_epi8(qs0ps0, filt), t80);
     119             : 
     120             :     /* filt >> 1 */
     121           0 :     filt = _mm_adds_epi16(filter1, t1);
     122           0 :     filt = _mm_srai_epi16(filt, 1);
     123           0 :     filt = _mm_andnot_si128(_mm_srai_epi16(_mm_unpacklo_epi8(zero, hev), 0x8),
     124             :                             filt);
     125           0 :     filt = _mm_packs_epi16(filt, _mm_subs_epi16(zero, filt));
     126           0 :     qs1ps1 = _mm_xor_si128(_mm_adds_epi8(qs1ps1, filt), t80);
     127             :     // loopfilter done
     128             : 
     129             :     {
     130             :       __m128i work;
     131           0 :       flat = _mm_max_epu8(
     132             :           _mm_or_si128(_mm_subs_epu8(q2p2, q0p0), _mm_subs_epu8(q0p0, q2p2)),
     133             :           _mm_or_si128(_mm_subs_epu8(q3p3, q0p0), _mm_subs_epu8(q0p0, q3p3)));
     134           0 :       flat = _mm_max_epu8(abs_p1p0, flat);
     135           0 :       flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8));
     136           0 :       flat = _mm_subs_epu8(flat, one);
     137           0 :       flat = _mm_cmpeq_epi8(flat, zero);
     138           0 :       flat = _mm_and_si128(flat, mask);
     139             : 
     140           0 :       q5p5 = _mm_loadl_epi64((__m128i *)(s - 6 * p));
     141           0 :       q5p5 = _mm_castps_si128(
     142           0 :           _mm_loadh_pi(_mm_castsi128_ps(q5p5), (__m64 *)(s + 5 * p)));
     143             : 
     144           0 :       q6p6 = _mm_loadl_epi64((__m128i *)(s - 7 * p));
     145           0 :       q6p6 = _mm_castps_si128(
     146           0 :           _mm_loadh_pi(_mm_castsi128_ps(q6p6), (__m64 *)(s + 6 * p)));
     147             : 
     148           0 :       flat2 = _mm_max_epu8(
     149             :           _mm_or_si128(_mm_subs_epu8(q4p4, q0p0), _mm_subs_epu8(q0p0, q4p4)),
     150             :           _mm_or_si128(_mm_subs_epu8(q5p5, q0p0), _mm_subs_epu8(q0p0, q5p5)));
     151             : 
     152           0 :       q7p7 = _mm_loadl_epi64((__m128i *)(s - 8 * p));
     153           0 :       q7p7 = _mm_castps_si128(
     154           0 :           _mm_loadh_pi(_mm_castsi128_ps(q7p7), (__m64 *)(s + 7 * p)));
     155             : 
     156           0 :       work = _mm_max_epu8(
     157             :           _mm_or_si128(_mm_subs_epu8(q6p6, q0p0), _mm_subs_epu8(q0p0, q6p6)),
     158             :           _mm_or_si128(_mm_subs_epu8(q7p7, q0p0), _mm_subs_epu8(q0p0, q7p7)));
     159             : 
     160           0 :       flat2 = _mm_max_epu8(work, flat2);
     161           0 :       flat2 = _mm_max_epu8(flat2, _mm_srli_si128(flat2, 8));
     162           0 :       flat2 = _mm_subs_epu8(flat2, one);
     163           0 :       flat2 = _mm_cmpeq_epi8(flat2, zero);
     164           0 :       flat2 = _mm_and_si128(flat2, flat);  // flat2 & flat & mask
     165             :     }
     166             : 
     167             :     // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     168             :     // flat and wide flat calculations
     169             :     {
     170           0 :       const __m128i eight = _mm_set1_epi16(8);
     171           0 :       const __m128i four = _mm_set1_epi16(4);
     172             :       __m128i p7_16, p6_16, p5_16, p4_16, p3_16, p2_16, p1_16, p0_16;
     173             :       __m128i q7_16, q6_16, q5_16, q4_16, q3_16, q2_16, q1_16, q0_16;
     174             :       __m128i pixelFilter_p, pixelFilter_q;
     175             :       __m128i pixetFilter_p2p1p0, pixetFilter_q2q1q0;
     176             :       __m128i sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q;
     177             : 
     178           0 :       p7_16 = _mm_unpacklo_epi8(q7p7, zero);
     179           0 :       p6_16 = _mm_unpacklo_epi8(q6p6, zero);
     180           0 :       p5_16 = _mm_unpacklo_epi8(q5p5, zero);
     181           0 :       p4_16 = _mm_unpacklo_epi8(q4p4, zero);
     182           0 :       p3_16 = _mm_unpacklo_epi8(q3p3, zero);
     183           0 :       p2_16 = _mm_unpacklo_epi8(q2p2, zero);
     184           0 :       p1_16 = _mm_unpacklo_epi8(q1p1, zero);
     185           0 :       p0_16 = _mm_unpacklo_epi8(q0p0, zero);
     186           0 :       q0_16 = _mm_unpackhi_epi8(q0p0, zero);
     187           0 :       q1_16 = _mm_unpackhi_epi8(q1p1, zero);
     188           0 :       q2_16 = _mm_unpackhi_epi8(q2p2, zero);
     189           0 :       q3_16 = _mm_unpackhi_epi8(q3p3, zero);
     190           0 :       q4_16 = _mm_unpackhi_epi8(q4p4, zero);
     191           0 :       q5_16 = _mm_unpackhi_epi8(q5p5, zero);
     192           0 :       q6_16 = _mm_unpackhi_epi8(q6p6, zero);
     193           0 :       q7_16 = _mm_unpackhi_epi8(q7p7, zero);
     194             : 
     195           0 :       pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6_16, p5_16),
     196             :                                     _mm_add_epi16(p4_16, p3_16));
     197           0 :       pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6_16, q5_16),
     198             :                                     _mm_add_epi16(q4_16, q3_16));
     199             : 
     200           0 :       pixetFilter_p2p1p0 = _mm_add_epi16(p0_16, _mm_add_epi16(p2_16, p1_16));
     201           0 :       pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
     202             : 
     203           0 :       pixetFilter_q2q1q0 = _mm_add_epi16(q0_16, _mm_add_epi16(q2_16, q1_16));
     204           0 :       pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
     205           0 :       pixelFilter_p =
     206           0 :           _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p, pixelFilter_q));
     207           0 :       pixetFilter_p2p1p0 = _mm_add_epi16(
     208             :           four, _mm_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0));
     209           0 :       res_p = _mm_srli_epi16(
     210             :           _mm_add_epi16(pixelFilter_p, _mm_add_epi16(p7_16, p0_16)), 4);
     211           0 :       res_q = _mm_srli_epi16(
     212             :           _mm_add_epi16(pixelFilter_p, _mm_add_epi16(q7_16, q0_16)), 4);
     213           0 :       flat2_q0p0 = _mm_packus_epi16(res_p, res_q);
     214           0 :       res_p = _mm_srli_epi16(
     215             :           _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(p3_16, p0_16)), 3);
     216           0 :       res_q = _mm_srli_epi16(
     217             :           _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(q3_16, q0_16)), 3);
     218             : 
     219           0 :       flat_q0p0 = _mm_packus_epi16(res_p, res_q);
     220             : 
     221           0 :       sum_p7 = _mm_add_epi16(p7_16, p7_16);
     222           0 :       sum_q7 = _mm_add_epi16(q7_16, q7_16);
     223           0 :       sum_p3 = _mm_add_epi16(p3_16, p3_16);
     224           0 :       sum_q3 = _mm_add_epi16(q3_16, q3_16);
     225             : 
     226           0 :       pixelFilter_q = _mm_sub_epi16(pixelFilter_p, p6_16);
     227           0 :       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q6_16);
     228           0 :       res_p = _mm_srli_epi16(
     229             :           _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p1_16)), 4);
     230           0 :       res_q = _mm_srli_epi16(
     231             :           _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q1_16)), 4);
     232           0 :       flat2_q1p1 = _mm_packus_epi16(res_p, res_q);
     233             : 
     234           0 :       pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2_16);
     235           0 :       pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2_16);
     236           0 :       res_p = _mm_srli_epi16(
     237             :           _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p1_16)), 3);
     238           0 :       res_q = _mm_srli_epi16(
     239             :           _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q1_16)), 3);
     240           0 :       flat_q1p1 = _mm_packus_epi16(res_p, res_q);
     241             : 
     242           0 :       sum_p7 = _mm_add_epi16(sum_p7, p7_16);
     243           0 :       sum_q7 = _mm_add_epi16(sum_q7, q7_16);
     244           0 :       sum_p3 = _mm_add_epi16(sum_p3, p3_16);
     245           0 :       sum_q3 = _mm_add_epi16(sum_q3, q3_16);
     246             : 
     247           0 :       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5_16);
     248           0 :       pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5_16);
     249           0 :       res_p = _mm_srli_epi16(
     250             :           _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p2_16)), 4);
     251           0 :       res_q = _mm_srli_epi16(
     252             :           _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q2_16)), 4);
     253           0 :       flat2_q2p2 = _mm_packus_epi16(res_p, res_q);
     254             : 
     255           0 :       pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1_16);
     256           0 :       pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1_16);
     257             : 
     258           0 :       res_p = _mm_srli_epi16(
     259             :           _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p2_16)), 3);
     260           0 :       res_q = _mm_srli_epi16(
     261             :           _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q2_16)), 3);
     262           0 :       flat_q2p2 = _mm_packus_epi16(res_p, res_q);
     263             : 
     264           0 :       sum_p7 = _mm_add_epi16(sum_p7, p7_16);
     265           0 :       sum_q7 = _mm_add_epi16(sum_q7, q7_16);
     266           0 :       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4_16);
     267           0 :       pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4_16);
     268           0 :       res_p = _mm_srli_epi16(
     269             :           _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p3_16)), 4);
     270           0 :       res_q = _mm_srli_epi16(
     271             :           _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q3_16)), 4);
     272           0 :       flat2_q3p3 = _mm_packus_epi16(res_p, res_q);
     273             : 
     274           0 :       sum_p7 = _mm_add_epi16(sum_p7, p7_16);
     275           0 :       sum_q7 = _mm_add_epi16(sum_q7, q7_16);
     276           0 :       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3_16);
     277           0 :       pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3_16);
     278           0 :       res_p = _mm_srli_epi16(
     279             :           _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p4_16)), 4);
     280           0 :       res_q = _mm_srli_epi16(
     281             :           _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q4_16)), 4);
     282           0 :       flat2_q4p4 = _mm_packus_epi16(res_p, res_q);
     283             : 
     284           0 :       sum_p7 = _mm_add_epi16(sum_p7, p7_16);
     285           0 :       sum_q7 = _mm_add_epi16(sum_q7, q7_16);
     286           0 :       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2_16);
     287           0 :       pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2_16);
     288           0 :       res_p = _mm_srli_epi16(
     289             :           _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p5_16)), 4);
     290           0 :       res_q = _mm_srli_epi16(
     291             :           _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q5_16)), 4);
     292           0 :       flat2_q5p5 = _mm_packus_epi16(res_p, res_q);
     293             : 
     294           0 :       sum_p7 = _mm_add_epi16(sum_p7, p7_16);
     295           0 :       sum_q7 = _mm_add_epi16(sum_q7, q7_16);
     296           0 :       pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1_16);
     297           0 :       pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1_16);
     298           0 :       res_p = _mm_srli_epi16(
     299             :           _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p6_16)), 4);
     300           0 :       res_q = _mm_srli_epi16(
     301             :           _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q6_16)), 4);
     302           0 :       flat2_q6p6 = _mm_packus_epi16(res_p, res_q);
     303             :     }
     304             :     // wide flat
     305             :     // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     306             : 
     307           0 :     flat = _mm_shuffle_epi32(flat, 68);
     308           0 :     flat2 = _mm_shuffle_epi32(flat2, 68);
     309             : 
     310           0 :     q2p2 = _mm_andnot_si128(flat, q2p2);
     311           0 :     flat_q2p2 = _mm_and_si128(flat, flat_q2p2);
     312           0 :     q2p2 = _mm_or_si128(q2p2, flat_q2p2);
     313             : 
     314           0 :     qs1ps1 = _mm_andnot_si128(flat, qs1ps1);
     315           0 :     flat_q1p1 = _mm_and_si128(flat, flat_q1p1);
     316           0 :     q1p1 = _mm_or_si128(qs1ps1, flat_q1p1);
     317             : 
     318           0 :     qs0ps0 = _mm_andnot_si128(flat, qs0ps0);
     319           0 :     flat_q0p0 = _mm_and_si128(flat, flat_q0p0);
     320           0 :     q0p0 = _mm_or_si128(qs0ps0, flat_q0p0);
     321             : 
     322           0 :     q6p6 = _mm_andnot_si128(flat2, q6p6);
     323           0 :     flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6);
     324           0 :     q6p6 = _mm_or_si128(q6p6, flat2_q6p6);
     325           0 :     _mm_storel_epi64((__m128i *)(s - 7 * p), q6p6);
     326           0 :     _mm_storeh_pi((__m64 *)(s + 6 * p), _mm_castsi128_ps(q6p6));
     327             : 
     328           0 :     q5p5 = _mm_andnot_si128(flat2, q5p5);
     329           0 :     flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5);
     330           0 :     q5p5 = _mm_or_si128(q5p5, flat2_q5p5);
     331           0 :     _mm_storel_epi64((__m128i *)(s - 6 * p), q5p5);
     332           0 :     _mm_storeh_pi((__m64 *)(s + 5 * p), _mm_castsi128_ps(q5p5));
     333             : 
     334           0 :     q4p4 = _mm_andnot_si128(flat2, q4p4);
     335           0 :     flat2_q4p4 = _mm_and_si128(flat2, flat2_q4p4);
     336           0 :     q4p4 = _mm_or_si128(q4p4, flat2_q4p4);
     337           0 :     _mm_storel_epi64((__m128i *)(s - 5 * p), q4p4);
     338           0 :     _mm_storeh_pi((__m64 *)(s + 4 * p), _mm_castsi128_ps(q4p4));
     339             : 
     340           0 :     q3p3 = _mm_andnot_si128(flat2, q3p3);
     341           0 :     flat2_q3p3 = _mm_and_si128(flat2, flat2_q3p3);
     342           0 :     q3p3 = _mm_or_si128(q3p3, flat2_q3p3);
     343           0 :     _mm_storel_epi64((__m128i *)(s - 4 * p), q3p3);
     344           0 :     _mm_storeh_pi((__m64 *)(s + 3 * p), _mm_castsi128_ps(q3p3));
     345             : 
     346           0 :     q2p2 = _mm_andnot_si128(flat2, q2p2);
     347           0 :     flat2_q2p2 = _mm_and_si128(flat2, flat2_q2p2);
     348           0 :     q2p2 = _mm_or_si128(q2p2, flat2_q2p2);
     349           0 :     _mm_storel_epi64((__m128i *)(s - 3 * p), q2p2);
     350           0 :     _mm_storeh_pi((__m64 *)(s + 2 * p), _mm_castsi128_ps(q2p2));
     351             : 
     352           0 :     q1p1 = _mm_andnot_si128(flat2, q1p1);
     353           0 :     flat2_q1p1 = _mm_and_si128(flat2, flat2_q1p1);
     354           0 :     q1p1 = _mm_or_si128(q1p1, flat2_q1p1);
     355           0 :     _mm_storel_epi64((__m128i *)(s - 2 * p), q1p1);
     356           0 :     _mm_storeh_pi((__m64 *)(s + 1 * p), _mm_castsi128_ps(q1p1));
     357             : 
     358           0 :     q0p0 = _mm_andnot_si128(flat2, q0p0);
     359           0 :     flat2_q0p0 = _mm_and_si128(flat2, flat2_q0p0);
     360           0 :     q0p0 = _mm_or_si128(q0p0, flat2_q0p0);
     361           0 :     _mm_storel_epi64((__m128i *)(s - 1 * p), q0p0);
     362           0 :     _mm_storeh_pi((__m64 *)(s - 0 * p), _mm_castsi128_ps(q0p0));
     363             :   }
     364           0 : }
     365             : 
     366             : DECLARE_ALIGNED(32, static const uint8_t, filt_loopfilter_avx2[32]) = {
     367             :   0, 128, 1, 128, 2,  128, 3,  128, 4,  128, 5,  128, 6,  128, 7,  128,
     368             :   8, 128, 9, 128, 10, 128, 11, 128, 12, 128, 13, 128, 14, 128, 15, 128
     369             : };
     370             : 
     371           0 : void aom_lpf_horizontal_edge_16_avx2(unsigned char *s, int p,
     372             :                                      const unsigned char *_blimit,
     373             :                                      const unsigned char *_limit,
     374             :                                      const unsigned char *_thresh) {
     375             :   __m128i mask, hev, flat, flat2;
     376           0 :   const __m128i zero = _mm_set1_epi16(0);
     377           0 :   const __m128i one = _mm_set1_epi8(1);
     378             :   __m128i p7, p6, p5;
     379             :   __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4;
     380             :   __m128i q5, q6, q7;
     381             :   __m256i p256_7, q256_7, p256_6, q256_6, p256_5, q256_5, p256_4, q256_4,
     382             :       p256_3, q256_3, p256_2, q256_2, p256_1, q256_1, p256_0, q256_0;
     383             : 
     384           0 :   const __m128i thresh =
     385           0 :       _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_thresh[0]));
     386           0 :   const __m128i limit = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_limit[0]));
     387           0 :   const __m128i blimit =
     388           0 :       _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_blimit[0]));
     389             : 
     390           0 :   p256_4 =
     391           0 :       _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 5 * p)));
     392           0 :   p256_3 =
     393           0 :       _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 4 * p)));
     394           0 :   p256_2 =
     395           0 :       _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 3 * p)));
     396           0 :   p256_1 =
     397           0 :       _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 2 * p)));
     398           0 :   p256_0 =
     399           0 :       _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 1 * p)));
     400           0 :   q256_0 =
     401           0 :       _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 0 * p)));
     402           0 :   q256_1 =
     403           0 :       _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 1 * p)));
     404           0 :   q256_2 =
     405           0 :       _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 2 * p)));
     406           0 :   q256_3 =
     407           0 :       _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 3 * p)));
     408           0 :   q256_4 =
     409           0 :       _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 4 * p)));
     410             : 
     411           0 :   p4 = _mm256_castsi256_si128(p256_4);
     412           0 :   p3 = _mm256_castsi256_si128(p256_3);
     413           0 :   p2 = _mm256_castsi256_si128(p256_2);
     414           0 :   p1 = _mm256_castsi256_si128(p256_1);
     415           0 :   p0 = _mm256_castsi256_si128(p256_0);
     416           0 :   q0 = _mm256_castsi256_si128(q256_0);
     417           0 :   q1 = _mm256_castsi256_si128(q256_1);
     418           0 :   q2 = _mm256_castsi256_si128(q256_2);
     419           0 :   q3 = _mm256_castsi256_si128(q256_3);
     420           0 :   q4 = _mm256_castsi256_si128(q256_4);
     421             : 
     422             :   {
     423           0 :     const __m128i abs_p1p0 =
     424           0 :         _mm_or_si128(_mm_subs_epu8(p1, p0), _mm_subs_epu8(p0, p1));
     425           0 :     const __m128i abs_q1q0 =
     426           0 :         _mm_or_si128(_mm_subs_epu8(q1, q0), _mm_subs_epu8(q0, q1));
     427           0 :     const __m128i fe = _mm_set1_epi8(0xfe);
     428           0 :     const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0);
     429           0 :     __m128i abs_p0q0 =
     430           0 :         _mm_or_si128(_mm_subs_epu8(p0, q0), _mm_subs_epu8(q0, p0));
     431           0 :     __m128i abs_p1q1 =
     432           0 :         _mm_or_si128(_mm_subs_epu8(p1, q1), _mm_subs_epu8(q1, p1));
     433             :     __m128i work;
     434           0 :     flat = _mm_max_epu8(abs_p1p0, abs_q1q0);
     435           0 :     hev = _mm_subs_epu8(flat, thresh);
     436           0 :     hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff);
     437             : 
     438           0 :     abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0);
     439           0 :     abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1);
     440           0 :     mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit);
     441           0 :     mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff);
     442             :     // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit) * -1;
     443           0 :     mask = _mm_max_epu8(flat, mask);
     444             :     // mask |= (abs(p1 - p0) > limit) * -1;
     445             :     // mask |= (abs(q1 - q0) > limit) * -1;
     446           0 :     work = _mm_max_epu8(
     447             :         _mm_or_si128(_mm_subs_epu8(p2, p1), _mm_subs_epu8(p1, p2)),
     448             :         _mm_or_si128(_mm_subs_epu8(p3, p2), _mm_subs_epu8(p2, p3)));
     449           0 :     mask = _mm_max_epu8(work, mask);
     450           0 :     work = _mm_max_epu8(
     451             :         _mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)),
     452             :         _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3)));
     453           0 :     mask = _mm_max_epu8(work, mask);
     454           0 :     mask = _mm_subs_epu8(mask, limit);
     455           0 :     mask = _mm_cmpeq_epi8(mask, zero);
     456             :   }
     457             : 
     458             :   // lp filter
     459             :   {
     460           0 :     const __m128i t4 = _mm_set1_epi8(4);
     461           0 :     const __m128i t3 = _mm_set1_epi8(3);
     462           0 :     const __m128i t80 = _mm_set1_epi8(0x80);
     463           0 :     const __m128i te0 = _mm_set1_epi8(0xe0);
     464           0 :     const __m128i t1f = _mm_set1_epi8(0x1f);
     465           0 :     const __m128i t1 = _mm_set1_epi8(0x1);
     466           0 :     const __m128i t7f = _mm_set1_epi8(0x7f);
     467             : 
     468           0 :     __m128i ps1 = _mm_xor_si128(p1, t80);
     469           0 :     __m128i ps0 = _mm_xor_si128(p0, t80);
     470           0 :     __m128i qs0 = _mm_xor_si128(q0, t80);
     471           0 :     __m128i qs1 = _mm_xor_si128(q1, t80);
     472             :     __m128i filt;
     473             :     __m128i work_a;
     474             :     __m128i filter1, filter2;
     475             :     __m128i flat2_p6, flat2_p5, flat2_p4, flat2_p3, flat2_p2, flat2_p1,
     476             :         flat2_p0, flat2_q0, flat2_q1, flat2_q2, flat2_q3, flat2_q4, flat2_q5,
     477             :         flat2_q6, flat_p2, flat_p1, flat_p0, flat_q0, flat_q1, flat_q2;
     478             : 
     479           0 :     filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev);
     480           0 :     work_a = _mm_subs_epi8(qs0, ps0);
     481           0 :     filt = _mm_adds_epi8(filt, work_a);
     482           0 :     filt = _mm_adds_epi8(filt, work_a);
     483           0 :     filt = _mm_adds_epi8(filt, work_a);
     484             :     /* (aom_filter + 3 * (qs0 - ps0)) & mask */
     485           0 :     filt = _mm_and_si128(filt, mask);
     486             : 
     487           0 :     filter1 = _mm_adds_epi8(filt, t4);
     488           0 :     filter2 = _mm_adds_epi8(filt, t3);
     489             : 
     490             :     /* Filter1 >> 3 */
     491           0 :     work_a = _mm_cmpgt_epi8(zero, filter1);
     492           0 :     filter1 = _mm_srli_epi16(filter1, 3);
     493           0 :     work_a = _mm_and_si128(work_a, te0);
     494           0 :     filter1 = _mm_and_si128(filter1, t1f);
     495           0 :     filter1 = _mm_or_si128(filter1, work_a);
     496           0 :     qs0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80);
     497             : 
     498             :     /* Filter2 >> 3 */
     499           0 :     work_a = _mm_cmpgt_epi8(zero, filter2);
     500           0 :     filter2 = _mm_srli_epi16(filter2, 3);
     501           0 :     work_a = _mm_and_si128(work_a, te0);
     502           0 :     filter2 = _mm_and_si128(filter2, t1f);
     503           0 :     filter2 = _mm_or_si128(filter2, work_a);
     504           0 :     ps0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80);
     505             : 
     506             :     /* filt >> 1 */
     507           0 :     filt = _mm_adds_epi8(filter1, t1);
     508           0 :     work_a = _mm_cmpgt_epi8(zero, filt);
     509           0 :     filt = _mm_srli_epi16(filt, 1);
     510           0 :     work_a = _mm_and_si128(work_a, t80);
     511           0 :     filt = _mm_and_si128(filt, t7f);
     512           0 :     filt = _mm_or_si128(filt, work_a);
     513           0 :     filt = _mm_andnot_si128(hev, filt);
     514           0 :     ps1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80);
     515           0 :     qs1 = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80);
     516             :     // loopfilter done
     517             : 
     518             :     {
     519             :       __m128i work;
     520           0 :       work = _mm_max_epu8(
     521             :           _mm_or_si128(_mm_subs_epu8(p2, p0), _mm_subs_epu8(p0, p2)),
     522             :           _mm_or_si128(_mm_subs_epu8(q2, q0), _mm_subs_epu8(q0, q2)));
     523           0 :       flat = _mm_max_epu8(work, flat);
     524           0 :       work = _mm_max_epu8(
     525             :           _mm_or_si128(_mm_subs_epu8(p3, p0), _mm_subs_epu8(p0, p3)),
     526             :           _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3)));
     527           0 :       flat = _mm_max_epu8(work, flat);
     528           0 :       work = _mm_max_epu8(
     529             :           _mm_or_si128(_mm_subs_epu8(p4, p0), _mm_subs_epu8(p0, p4)),
     530             :           _mm_or_si128(_mm_subs_epu8(q4, q0), _mm_subs_epu8(q0, q4)));
     531           0 :       flat = _mm_subs_epu8(flat, one);
     532           0 :       flat = _mm_cmpeq_epi8(flat, zero);
     533           0 :       flat = _mm_and_si128(flat, mask);
     534             : 
     535           0 :       p256_5 = _mm256_castpd_si256(
     536           0 :           _mm256_broadcast_pd((__m128d const *)(s - 6 * p)));
     537           0 :       q256_5 = _mm256_castpd_si256(
     538           0 :           _mm256_broadcast_pd((__m128d const *)(s + 5 * p)));
     539           0 :       p5 = _mm256_castsi256_si128(p256_5);
     540           0 :       q5 = _mm256_castsi256_si128(q256_5);
     541           0 :       flat2 = _mm_max_epu8(
     542             :           _mm_or_si128(_mm_subs_epu8(p5, p0), _mm_subs_epu8(p0, p5)),
     543             :           _mm_or_si128(_mm_subs_epu8(q5, q0), _mm_subs_epu8(q0, q5)));
     544             : 
     545           0 :       flat2 = _mm_max_epu8(work, flat2);
     546           0 :       p256_6 = _mm256_castpd_si256(
     547           0 :           _mm256_broadcast_pd((__m128d const *)(s - 7 * p)));
     548           0 :       q256_6 = _mm256_castpd_si256(
     549           0 :           _mm256_broadcast_pd((__m128d const *)(s + 6 * p)));
     550           0 :       p6 = _mm256_castsi256_si128(p256_6);
     551           0 :       q6 = _mm256_castsi256_si128(q256_6);
     552           0 :       work = _mm_max_epu8(
     553             :           _mm_or_si128(_mm_subs_epu8(p6, p0), _mm_subs_epu8(p0, p6)),
     554             :           _mm_or_si128(_mm_subs_epu8(q6, q0), _mm_subs_epu8(q0, q6)));
     555             : 
     556           0 :       flat2 = _mm_max_epu8(work, flat2);
     557             : 
     558           0 :       p256_7 = _mm256_castpd_si256(
     559           0 :           _mm256_broadcast_pd((__m128d const *)(s - 8 * p)));
     560           0 :       q256_7 = _mm256_castpd_si256(
     561           0 :           _mm256_broadcast_pd((__m128d const *)(s + 7 * p)));
     562           0 :       p7 = _mm256_castsi256_si128(p256_7);
     563           0 :       q7 = _mm256_castsi256_si128(q256_7);
     564           0 :       work = _mm_max_epu8(
     565             :           _mm_or_si128(_mm_subs_epu8(p7, p0), _mm_subs_epu8(p0, p7)),
     566             :           _mm_or_si128(_mm_subs_epu8(q7, q0), _mm_subs_epu8(q0, q7)));
     567             : 
     568           0 :       flat2 = _mm_max_epu8(work, flat2);
     569           0 :       flat2 = _mm_subs_epu8(flat2, one);
     570           0 :       flat2 = _mm_cmpeq_epi8(flat2, zero);
     571           0 :       flat2 = _mm_and_si128(flat2, flat);  // flat2 & flat & mask
     572             :     }
     573             : 
     574             :     // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     575             :     // flat and wide flat calculations
     576             :     {
     577           0 :       const __m256i eight = _mm256_set1_epi16(8);
     578           0 :       const __m256i four = _mm256_set1_epi16(4);
     579             :       __m256i pixelFilter_p, pixelFilter_q, pixetFilter_p2p1p0,
     580             :           pixetFilter_q2q1q0, sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q;
     581             : 
     582           0 :       const __m256i filter =
     583             :           _mm256_load_si256((__m256i const *)filt_loopfilter_avx2);
     584           0 :       p256_7 = _mm256_shuffle_epi8(p256_7, filter);
     585           0 :       p256_6 = _mm256_shuffle_epi8(p256_6, filter);
     586           0 :       p256_5 = _mm256_shuffle_epi8(p256_5, filter);
     587           0 :       p256_4 = _mm256_shuffle_epi8(p256_4, filter);
     588           0 :       p256_3 = _mm256_shuffle_epi8(p256_3, filter);
     589           0 :       p256_2 = _mm256_shuffle_epi8(p256_2, filter);
     590           0 :       p256_1 = _mm256_shuffle_epi8(p256_1, filter);
     591           0 :       p256_0 = _mm256_shuffle_epi8(p256_0, filter);
     592           0 :       q256_0 = _mm256_shuffle_epi8(q256_0, filter);
     593           0 :       q256_1 = _mm256_shuffle_epi8(q256_1, filter);
     594           0 :       q256_2 = _mm256_shuffle_epi8(q256_2, filter);
     595           0 :       q256_3 = _mm256_shuffle_epi8(q256_3, filter);
     596           0 :       q256_4 = _mm256_shuffle_epi8(q256_4, filter);
     597           0 :       q256_5 = _mm256_shuffle_epi8(q256_5, filter);
     598           0 :       q256_6 = _mm256_shuffle_epi8(q256_6, filter);
     599           0 :       q256_7 = _mm256_shuffle_epi8(q256_7, filter);
     600             : 
     601           0 :       pixelFilter_p = _mm256_add_epi16(_mm256_add_epi16(p256_6, p256_5),
     602             :                                        _mm256_add_epi16(p256_4, p256_3));
     603           0 :       pixelFilter_q = _mm256_add_epi16(_mm256_add_epi16(q256_6, q256_5),
     604             :                                        _mm256_add_epi16(q256_4, q256_3));
     605             : 
     606           0 :       pixetFilter_p2p1p0 =
     607           0 :           _mm256_add_epi16(p256_0, _mm256_add_epi16(p256_2, p256_1));
     608           0 :       pixelFilter_p = _mm256_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
     609             : 
     610           0 :       pixetFilter_q2q1q0 =
     611           0 :           _mm256_add_epi16(q256_0, _mm256_add_epi16(q256_2, q256_1));
     612           0 :       pixelFilter_q = _mm256_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
     613             : 
     614           0 :       pixelFilter_p = _mm256_add_epi16(
     615             :           eight, _mm256_add_epi16(pixelFilter_p, pixelFilter_q));
     616             : 
     617           0 :       pixetFilter_p2p1p0 = _mm256_add_epi16(
     618             :           four, _mm256_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0));
     619             : 
     620           0 :       res_p = _mm256_srli_epi16(
     621             :           _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(p256_7, p256_0)), 4);
     622             : 
     623           0 :       flat2_p0 = _mm256_castsi256_si128(
     624           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
     625             : 
     626           0 :       res_q = _mm256_srli_epi16(
     627             :           _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(q256_7, q256_0)), 4);
     628             : 
     629           0 :       flat2_q0 = _mm256_castsi256_si128(
     630           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     631             : 
     632           0 :       res_p =
     633           0 :           _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0,
     634             :                                              _mm256_add_epi16(p256_3, p256_0)),
     635             :                             3);
     636             : 
     637           0 :       flat_p0 = _mm256_castsi256_si128(
     638           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
     639             : 
     640           0 :       res_q =
     641           0 :           _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0,
     642             :                                              _mm256_add_epi16(q256_3, q256_0)),
     643             :                             3);
     644             : 
     645           0 :       flat_q0 = _mm256_castsi256_si128(
     646           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     647             : 
     648           0 :       sum_p7 = _mm256_add_epi16(p256_7, p256_7);
     649             : 
     650           0 :       sum_q7 = _mm256_add_epi16(q256_7, q256_7);
     651             : 
     652           0 :       sum_p3 = _mm256_add_epi16(p256_3, p256_3);
     653             : 
     654           0 :       sum_q3 = _mm256_add_epi16(q256_3, q256_3);
     655             : 
     656           0 :       pixelFilter_q = _mm256_sub_epi16(pixelFilter_p, p256_6);
     657             : 
     658           0 :       pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_6);
     659             : 
     660           0 :       res_p = _mm256_srli_epi16(
     661             :           _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_1)), 4);
     662             : 
     663           0 :       flat2_p1 = _mm256_castsi256_si128(
     664           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
     665             : 
     666           0 :       res_q = _mm256_srli_epi16(
     667             :           _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_1)), 4);
     668             : 
     669           0 :       flat2_q1 = _mm256_castsi256_si128(
     670           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     671             : 
     672           0 :       pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_p2p1p0, p256_2);
     673             : 
     674           0 :       pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_2);
     675             : 
     676           0 :       res_p =
     677           0 :           _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0,
     678             :                                              _mm256_add_epi16(sum_p3, p256_1)),
     679             :                             3);
     680             : 
     681           0 :       flat_p1 = _mm256_castsi256_si128(
     682           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
     683             : 
     684           0 :       res_q =
     685           0 :           _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_q2q1q0,
     686             :                                              _mm256_add_epi16(sum_q3, q256_1)),
     687             :                             3);
     688             : 
     689           0 :       flat_q1 = _mm256_castsi256_si128(
     690           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     691             : 
     692           0 :       sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
     693             : 
     694           0 :       sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
     695             : 
     696           0 :       sum_p3 = _mm256_add_epi16(sum_p3, p256_3);
     697             : 
     698           0 :       sum_q3 = _mm256_add_epi16(sum_q3, q256_3);
     699             : 
     700           0 :       pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_5);
     701             : 
     702           0 :       pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_5);
     703             : 
     704           0 :       res_p = _mm256_srli_epi16(
     705             :           _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_2)), 4);
     706             : 
     707           0 :       flat2_p2 = _mm256_castsi256_si128(
     708           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
     709             : 
     710           0 :       res_q = _mm256_srli_epi16(
     711             :           _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_2)), 4);
     712             : 
     713           0 :       flat2_q2 = _mm256_castsi256_si128(
     714           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     715             : 
     716           0 :       pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_1);
     717             : 
     718           0 :       pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_q2q1q0, p256_1);
     719             : 
     720           0 :       res_p =
     721           0 :           _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0,
     722             :                                              _mm256_add_epi16(sum_p3, p256_2)),
     723             :                             3);
     724             : 
     725           0 :       flat_p2 = _mm256_castsi256_si128(
     726           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
     727             : 
     728           0 :       res_q =
     729           0 :           _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_q2q1q0,
     730             :                                              _mm256_add_epi16(sum_q3, q256_2)),
     731             :                             3);
     732             : 
     733           0 :       flat_q2 = _mm256_castsi256_si128(
     734           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     735             : 
     736           0 :       sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
     737             : 
     738           0 :       sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
     739             : 
     740           0 :       pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_4);
     741             : 
     742           0 :       pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_4);
     743             : 
     744           0 :       res_p = _mm256_srli_epi16(
     745             :           _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_3)), 4);
     746             : 
     747           0 :       flat2_p3 = _mm256_castsi256_si128(
     748           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
     749             : 
     750           0 :       res_q = _mm256_srli_epi16(
     751             :           _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_3)), 4);
     752             : 
     753           0 :       flat2_q3 = _mm256_castsi256_si128(
     754           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     755             : 
     756           0 :       sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
     757             : 
     758           0 :       sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
     759             : 
     760           0 :       pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_3);
     761             : 
     762           0 :       pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_3);
     763             : 
     764           0 :       res_p = _mm256_srli_epi16(
     765             :           _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_4)), 4);
     766             : 
     767           0 :       flat2_p4 = _mm256_castsi256_si128(
     768           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
     769             : 
     770           0 :       res_q = _mm256_srli_epi16(
     771             :           _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_4)), 4);
     772             : 
     773           0 :       flat2_q4 = _mm256_castsi256_si128(
     774           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     775             : 
     776           0 :       sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
     777             : 
     778           0 :       sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
     779             : 
     780           0 :       pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_2);
     781             : 
     782           0 :       pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_2);
     783             : 
     784           0 :       res_p = _mm256_srli_epi16(
     785             :           _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_5)), 4);
     786             : 
     787           0 :       flat2_p5 = _mm256_castsi256_si128(
     788           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
     789             : 
     790           0 :       res_q = _mm256_srli_epi16(
     791             :           _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_5)), 4);
     792             : 
     793           0 :       flat2_q5 = _mm256_castsi256_si128(
     794           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     795             : 
     796           0 :       sum_p7 = _mm256_add_epi16(sum_p7, p256_7);
     797             : 
     798           0 :       sum_q7 = _mm256_add_epi16(sum_q7, q256_7);
     799             : 
     800           0 :       pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_1);
     801             : 
     802           0 :       pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_1);
     803             : 
     804           0 :       res_p = _mm256_srli_epi16(
     805             :           _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_6)), 4);
     806             : 
     807           0 :       flat2_p6 = _mm256_castsi256_si128(
     808           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168));
     809             : 
     810           0 :       res_q = _mm256_srli_epi16(
     811             :           _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_6)), 4);
     812             : 
     813           0 :       flat2_q6 = _mm256_castsi256_si128(
     814           0 :           _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168));
     815             :     }
     816             : 
     817             :     // wide flat
     818             :     // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     819             : 
     820           0 :     p2 = _mm_andnot_si128(flat, p2);
     821           0 :     flat_p2 = _mm_and_si128(flat, flat_p2);
     822           0 :     p2 = _mm_or_si128(flat_p2, p2);
     823             : 
     824           0 :     p1 = _mm_andnot_si128(flat, ps1);
     825           0 :     flat_p1 = _mm_and_si128(flat, flat_p1);
     826           0 :     p1 = _mm_or_si128(flat_p1, p1);
     827             : 
     828           0 :     p0 = _mm_andnot_si128(flat, ps0);
     829           0 :     flat_p0 = _mm_and_si128(flat, flat_p0);
     830           0 :     p0 = _mm_or_si128(flat_p0, p0);
     831             : 
     832           0 :     q0 = _mm_andnot_si128(flat, qs0);
     833           0 :     flat_q0 = _mm_and_si128(flat, flat_q0);
     834           0 :     q0 = _mm_or_si128(flat_q0, q0);
     835             : 
     836           0 :     q1 = _mm_andnot_si128(flat, qs1);
     837           0 :     flat_q1 = _mm_and_si128(flat, flat_q1);
     838           0 :     q1 = _mm_or_si128(flat_q1, q1);
     839             : 
     840           0 :     q2 = _mm_andnot_si128(flat, q2);
     841           0 :     flat_q2 = _mm_and_si128(flat, flat_q2);
     842           0 :     q2 = _mm_or_si128(flat_q2, q2);
     843             : 
     844           0 :     p6 = _mm_andnot_si128(flat2, p6);
     845           0 :     flat2_p6 = _mm_and_si128(flat2, flat2_p6);
     846           0 :     p6 = _mm_or_si128(flat2_p6, p6);
     847           0 :     _mm_storeu_si128((__m128i *)(s - 7 * p), p6);
     848             : 
     849           0 :     p5 = _mm_andnot_si128(flat2, p5);
     850           0 :     flat2_p5 = _mm_and_si128(flat2, flat2_p5);
     851           0 :     p5 = _mm_or_si128(flat2_p5, p5);
     852           0 :     _mm_storeu_si128((__m128i *)(s - 6 * p), p5);
     853             : 
     854           0 :     p4 = _mm_andnot_si128(flat2, p4);
     855           0 :     flat2_p4 = _mm_and_si128(flat2, flat2_p4);
     856           0 :     p4 = _mm_or_si128(flat2_p4, p4);
     857           0 :     _mm_storeu_si128((__m128i *)(s - 5 * p), p4);
     858             : 
     859           0 :     p3 = _mm_andnot_si128(flat2, p3);
     860           0 :     flat2_p3 = _mm_and_si128(flat2, flat2_p3);
     861           0 :     p3 = _mm_or_si128(flat2_p3, p3);
     862           0 :     _mm_storeu_si128((__m128i *)(s - 4 * p), p3);
     863             : 
     864           0 :     p2 = _mm_andnot_si128(flat2, p2);
     865           0 :     flat2_p2 = _mm_and_si128(flat2, flat2_p2);
     866           0 :     p2 = _mm_or_si128(flat2_p2, p2);
     867           0 :     _mm_storeu_si128((__m128i *)(s - 3 * p), p2);
     868             : 
     869           0 :     p1 = _mm_andnot_si128(flat2, p1);
     870           0 :     flat2_p1 = _mm_and_si128(flat2, flat2_p1);
     871           0 :     p1 = _mm_or_si128(flat2_p1, p1);
     872           0 :     _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
     873             : 
     874           0 :     p0 = _mm_andnot_si128(flat2, p0);
     875           0 :     flat2_p0 = _mm_and_si128(flat2, flat2_p0);
     876           0 :     p0 = _mm_or_si128(flat2_p0, p0);
     877           0 :     _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
     878             : 
     879           0 :     q0 = _mm_andnot_si128(flat2, q0);
     880           0 :     flat2_q0 = _mm_and_si128(flat2, flat2_q0);
     881           0 :     q0 = _mm_or_si128(flat2_q0, q0);
     882             :     _mm_storeu_si128((__m128i *)(s - 0 * p), q0);
     883             : 
     884           0 :     q1 = _mm_andnot_si128(flat2, q1);
     885           0 :     flat2_q1 = _mm_and_si128(flat2, flat2_q1);
     886           0 :     q1 = _mm_or_si128(flat2_q1, q1);
     887           0 :     _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
     888             : 
     889           0 :     q2 = _mm_andnot_si128(flat2, q2);
     890           0 :     flat2_q2 = _mm_and_si128(flat2, flat2_q2);
     891           0 :     q2 = _mm_or_si128(flat2_q2, q2);
     892           0 :     _mm_storeu_si128((__m128i *)(s + 2 * p), q2);
     893             : 
     894           0 :     q3 = _mm_andnot_si128(flat2, q3);
     895           0 :     flat2_q3 = _mm_and_si128(flat2, flat2_q3);
     896           0 :     q3 = _mm_or_si128(flat2_q3, q3);
     897           0 :     _mm_storeu_si128((__m128i *)(s + 3 * p), q3);
     898             : 
     899           0 :     q4 = _mm_andnot_si128(flat2, q4);
     900           0 :     flat2_q4 = _mm_and_si128(flat2, flat2_q4);
     901           0 :     q4 = _mm_or_si128(flat2_q4, q4);
     902           0 :     _mm_storeu_si128((__m128i *)(s + 4 * p), q4);
     903             : 
     904           0 :     q5 = _mm_andnot_si128(flat2, q5);
     905           0 :     flat2_q5 = _mm_and_si128(flat2, flat2_q5);
     906           0 :     q5 = _mm_or_si128(flat2_q5, q5);
     907           0 :     _mm_storeu_si128((__m128i *)(s + 5 * p), q5);
     908             : 
     909           0 :     q6 = _mm_andnot_si128(flat2, q6);
     910           0 :     flat2_q6 = _mm_and_si128(flat2, flat2_q6);
     911           0 :     q6 = _mm_or_si128(flat2_q6, q6);
     912           0 :     _mm_storeu_si128((__m128i *)(s + 6 * p), q6);
     913             :   }
     914             :   _mm256_zeroupper();
     915           0 : }

Generated by: LCOV version 1.13