Line data Source code
1 : /*
2 : * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
3 : *
4 : * Use of this source code is governed by a BSD-style license
5 : * that can be found in the LICENSE file in the root of the source
6 : * tree. An additional intellectual property rights grant can be found
7 : * in the file PATENTS. All contributing project authors may
8 : * be found in the AUTHORS file in the root of the source tree.
9 : */
10 :
11 : #include <assert.h>
12 : #include <emmintrin.h>
13 : #include <stdio.h>
14 :
15 : #include "./vpx_dsp_rtcd.h"
16 :
17 0 : static uint64_t vpx_sum_squares_2d_i16_4x4_sse2(const int16_t *src,
18 : int stride) {
19 0 : const __m128i v_val_0_w =
20 : _mm_loadl_epi64((const __m128i *)(src + 0 * stride));
21 0 : const __m128i v_val_1_w =
22 0 : _mm_loadl_epi64((const __m128i *)(src + 1 * stride));
23 0 : const __m128i v_val_2_w =
24 0 : _mm_loadl_epi64((const __m128i *)(src + 2 * stride));
25 0 : const __m128i v_val_3_w =
26 0 : _mm_loadl_epi64((const __m128i *)(src + 3 * stride));
27 :
28 0 : const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w);
29 0 : const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w);
30 0 : const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w);
31 0 : const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w);
32 :
33 0 : const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d);
34 0 : const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d);
35 0 : const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d);
36 :
37 0 : const __m128i v_sum_d =
38 0 : _mm_add_epi32(v_sum_0123_d, _mm_srli_epi64(v_sum_0123_d, 32));
39 :
40 0 : return (uint64_t)_mm_cvtsi128_si32(v_sum_d);
41 : }
42 :
43 : // TODO(jingning): Evaluate the performance impact here.
44 : #ifdef __GNUC__
45 : // This prevents GCC/Clang from inlining this function into
46 : // vpx_sum_squares_2d_i16_sse2, which in turn saves some stack
47 : // maintenance instructions in the common case of 4x4.
48 : __attribute__((noinline))
49 : #endif
50 : static uint64_t
51 0 : vpx_sum_squares_2d_i16_nxn_sse2(const int16_t *src, int stride, int size) {
52 : int r, c;
53 0 : const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
54 0 : __m128i v_acc_q = _mm_setzero_si128();
55 :
56 0 : for (r = 0; r < size; r += 8) {
57 0 : __m128i v_acc_d = _mm_setzero_si128();
58 :
59 0 : for (c = 0; c < size; c += 8) {
60 0 : const int16_t *b = src + c;
61 0 : const __m128i v_val_0_w =
62 : _mm_load_si128((const __m128i *)(b + 0 * stride));
63 0 : const __m128i v_val_1_w =
64 0 : _mm_load_si128((const __m128i *)(b + 1 * stride));
65 0 : const __m128i v_val_2_w =
66 0 : _mm_load_si128((const __m128i *)(b + 2 * stride));
67 0 : const __m128i v_val_3_w =
68 0 : _mm_load_si128((const __m128i *)(b + 3 * stride));
69 0 : const __m128i v_val_4_w =
70 0 : _mm_load_si128((const __m128i *)(b + 4 * stride));
71 0 : const __m128i v_val_5_w =
72 0 : _mm_load_si128((const __m128i *)(b + 5 * stride));
73 0 : const __m128i v_val_6_w =
74 0 : _mm_load_si128((const __m128i *)(b + 6 * stride));
75 0 : const __m128i v_val_7_w =
76 0 : _mm_load_si128((const __m128i *)(b + 7 * stride));
77 :
78 0 : const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w);
79 0 : const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w);
80 0 : const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w);
81 0 : const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w);
82 0 : const __m128i v_sq_4_d = _mm_madd_epi16(v_val_4_w, v_val_4_w);
83 0 : const __m128i v_sq_5_d = _mm_madd_epi16(v_val_5_w, v_val_5_w);
84 0 : const __m128i v_sq_6_d = _mm_madd_epi16(v_val_6_w, v_val_6_w);
85 0 : const __m128i v_sq_7_d = _mm_madd_epi16(v_val_7_w, v_val_7_w);
86 :
87 0 : const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d);
88 0 : const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d);
89 0 : const __m128i v_sum_45_d = _mm_add_epi32(v_sq_4_d, v_sq_5_d);
90 0 : const __m128i v_sum_67_d = _mm_add_epi32(v_sq_6_d, v_sq_7_d);
91 :
92 0 : const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d);
93 0 : const __m128i v_sum_4567_d = _mm_add_epi32(v_sum_45_d, v_sum_67_d);
94 :
95 0 : v_acc_d = _mm_add_epi32(v_acc_d, v_sum_0123_d);
96 0 : v_acc_d = _mm_add_epi32(v_acc_d, v_sum_4567_d);
97 : }
98 :
99 0 : v_acc_q = _mm_add_epi64(v_acc_q, _mm_and_si128(v_acc_d, v_zext_mask_q));
100 0 : v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_epi64(v_acc_d, 32));
101 :
102 0 : src += 8 * stride;
103 : }
104 :
105 0 : v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8));
106 :
107 : #if ARCH_X86_64
108 0 : return (uint64_t)_mm_cvtsi128_si64(v_acc_q);
109 : #else
110 : {
111 : uint64_t tmp;
112 : _mm_storel_epi64((__m128i *)&tmp, v_acc_q);
113 : return tmp;
114 : }
115 : #endif
116 : }
117 :
118 0 : uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t *src, int stride, int size) {
119 : // 4 elements per row only requires half an XMM register, so this
120 : // must be a special case, but also note that over 75% of all calls
121 : // are with size == 4, so it is also the common case.
122 0 : if (size == 4) {
123 0 : return vpx_sum_squares_2d_i16_4x4_sse2(src, stride);
124 : } else {
125 : // Generic case
126 0 : return vpx_sum_squares_2d_i16_nxn_sse2(src, stride, size);
127 : }
128 : }
|