Line data Source code
1 : /*
2 : * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 : *
4 : * Use of this source code is governed by a BSD-style license
5 : * that can be found in the LICENSE file in the root of the source
6 : * tree. An additional intellectual property rights grant can be found
7 : * in the file PATENTS. All contributing project authors may
8 : * be found in the AUTHORS file in the root of the source tree.
9 : */
10 :
11 : #include <emmintrin.h> // SSE2
12 :
13 : #include "./vpx_dsp_rtcd.h"
14 : #include "vpx_dsp/txfm_common.h"
15 : #include "vpx_dsp/x86/fwd_txfm_sse2.h"
16 : #include "vpx_dsp/x86/txfm_common_sse2.h"
17 : #include "vpx_ports/mem.h"
18 :
19 : // TODO(jingning) The high bit-depth functions need rework for performance.
20 : // After we properly fix the high bit-depth function implementations, this
21 : // file's dependency should be substantially simplified.
22 : #if DCT_HIGH_BIT_DEPTH
23 : #define ADD_EPI16 _mm_adds_epi16
24 : #define SUB_EPI16 _mm_subs_epi16
25 :
26 : #else
27 : #define ADD_EPI16 _mm_add_epi16
28 : #define SUB_EPI16 _mm_sub_epi16
29 : #endif
30 :
31 0 : void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) {
32 : // This 2D transform implements 4 vertical 1D transforms followed
33 : // by 4 horizontal 1D transforms. The multiplies and adds are as given
34 : // by Chen, Smith and Fralick ('77). The commands for moving the data
35 : // around have been minimized by hand.
36 : // For the purposes of the comments, the 16 inputs are referred to at i0
37 : // through iF (in raster order), intermediate variables are a0, b0, c0
38 : // through f, and correspond to the in-place computations mapped to input
39 : // locations. The outputs, o0 through oF are labeled according to the
40 : // output locations.
41 :
42 : // Constants
43 : // These are the coefficients used for the multiplies.
44 : // In the comments, pN means cos(N pi /64) and mN is -cos(N pi /64),
45 : // where cospi_N_64 = cos(N pi /64)
46 0 : const __m128i k__cospi_A =
47 0 : octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64,
48 : cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64);
49 0 : const __m128i k__cospi_B =
50 0 : octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64,
51 : cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64);
52 0 : const __m128i k__cospi_C =
53 0 : octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64,
54 : cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64);
55 0 : const __m128i k__cospi_D =
56 0 : octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64,
57 : cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64);
58 0 : const __m128i k__cospi_E =
59 0 : octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64,
60 : cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64);
61 0 : const __m128i k__cospi_F =
62 0 : octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64,
63 : cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64);
64 0 : const __m128i k__cospi_G =
65 0 : octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64,
66 : -cospi_8_64, -cospi_24_64, -cospi_8_64, -cospi_24_64);
67 0 : const __m128i k__cospi_H =
68 0 : octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64,
69 : -cospi_24_64, cospi_8_64, -cospi_24_64, cospi_8_64);
70 :
71 0 : const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
72 : // This second rounding constant saves doing some extra adds at the end
73 0 : const __m128i k__DCT_CONST_ROUNDING2 =
74 : _mm_set1_epi32(DCT_CONST_ROUNDING + (DCT_CONST_ROUNDING << 1));
75 0 : const int DCT_CONST_BITS2 = DCT_CONST_BITS + 2;
76 0 : const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
77 0 : const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
78 : __m128i in0, in1;
79 : #if DCT_HIGH_BIT_DEPTH
80 : __m128i cmp0, cmp1;
81 : int test, overflow;
82 : #endif
83 :
84 : // Load inputs.
85 0 : in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
86 0 : in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
87 0 : in1 = _mm_unpacklo_epi64(
88 0 : in1, _mm_loadl_epi64((const __m128i *)(input + 2 * stride)));
89 0 : in0 = _mm_unpacklo_epi64(
90 0 : in0, _mm_loadl_epi64((const __m128i *)(input + 3 * stride)));
91 : // in0 = [i0 i1 i2 i3 iC iD iE iF]
92 : // in1 = [i4 i5 i6 i7 i8 i9 iA iB]
93 : #if DCT_HIGH_BIT_DEPTH
94 : // Check inputs small enough to use optimised code
95 : cmp0 = _mm_xor_si128(_mm_cmpgt_epi16(in0, _mm_set1_epi16(0x3ff)),
96 : _mm_cmplt_epi16(in0, _mm_set1_epi16(0xfc00)));
97 : cmp1 = _mm_xor_si128(_mm_cmpgt_epi16(in1, _mm_set1_epi16(0x3ff)),
98 : _mm_cmplt_epi16(in1, _mm_set1_epi16(0xfc00)));
99 : test = _mm_movemask_epi8(_mm_or_si128(cmp0, cmp1));
100 : if (test) {
101 : vpx_highbd_fdct4x4_c(input, output, stride);
102 : return;
103 : }
104 : #endif // DCT_HIGH_BIT_DEPTH
105 :
106 : // multiply by 16 to give some extra precision
107 0 : in0 = _mm_slli_epi16(in0, 4);
108 0 : in1 = _mm_slli_epi16(in1, 4);
109 : // if (i == 0 && input[0]) input[0] += 1;
110 : // add 1 to the upper left pixel if it is non-zero, which helps reduce
111 : // the round-trip error
112 : {
113 : // The mask will only contain whether the first value is zero, all
114 : // other comparison will fail as something shifted by 4 (above << 4)
115 : // can never be equal to one. To increment in the non-zero case, we
116 : // add the mask and one for the first element:
117 : // - if zero, mask = -1, v = v - 1 + 1 = v
118 : // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1
119 0 : __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a);
120 0 : in0 = _mm_add_epi16(in0, mask);
121 0 : in0 = _mm_add_epi16(in0, k__nonzero_bias_b);
122 : }
123 : // There are 4 total stages, alternating between an add/subtract stage
124 : // followed by an multiply-and-add stage.
125 : {
126 : // Stage 1: Add/subtract
127 :
128 : // in0 = [i0 i1 i2 i3 iC iD iE iF]
129 : // in1 = [i4 i5 i6 i7 i8 i9 iA iB]
130 0 : const __m128i r0 = _mm_unpacklo_epi16(in0, in1);
131 0 : const __m128i r1 = _mm_unpackhi_epi16(in0, in1);
132 : // r0 = [i0 i4 i1 i5 i2 i6 i3 i7]
133 : // r1 = [iC i8 iD i9 iE iA iF iB]
134 0 : const __m128i r2 = _mm_shuffle_epi32(r0, 0xB4);
135 0 : const __m128i r3 = _mm_shuffle_epi32(r1, 0xB4);
136 : // r2 = [i0 i4 i1 i5 i3 i7 i2 i6]
137 : // r3 = [iC i8 iD i9 iF iB iE iA]
138 :
139 0 : const __m128i t0 = _mm_add_epi16(r2, r3);
140 0 : const __m128i t1 = _mm_sub_epi16(r2, r3);
141 : // t0 = [a0 a4 a1 a5 a3 a7 a2 a6]
142 : // t1 = [aC a8 aD a9 aF aB aE aA]
143 :
144 : // Stage 2: multiply by constants (which gets us into 32 bits).
145 : // The constants needed here are:
146 : // k__cospi_A = [p16 p16 p16 p16 p16 m16 p16 m16]
147 : // k__cospi_B = [p16 m16 p16 m16 p16 p16 p16 p16]
148 : // k__cospi_C = [p08 p24 p08 p24 p24 m08 p24 m08]
149 : // k__cospi_D = [p24 m08 p24 m08 p08 p24 p08 p24]
150 0 : const __m128i u0 = _mm_madd_epi16(t0, k__cospi_A);
151 0 : const __m128i u2 = _mm_madd_epi16(t0, k__cospi_B);
152 0 : const __m128i u1 = _mm_madd_epi16(t1, k__cospi_C);
153 0 : const __m128i u3 = _mm_madd_epi16(t1, k__cospi_D);
154 : // Then add and right-shift to get back to 16-bit range
155 0 : const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
156 0 : const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
157 0 : const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
158 0 : const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
159 0 : const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
160 0 : const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
161 0 : const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
162 0 : const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
163 : // w0 = [b0 b1 b7 b6]
164 : // w1 = [b8 b9 bF bE]
165 : // w2 = [b4 b5 b3 b2]
166 : // w3 = [bC bD bB bA]
167 0 : const __m128i x0 = _mm_packs_epi32(w0, w1);
168 0 : const __m128i x1 = _mm_packs_epi32(w2, w3);
169 : #if DCT_HIGH_BIT_DEPTH
170 : overflow = check_epi16_overflow_x2(&x0, &x1);
171 : if (overflow) {
172 : vpx_highbd_fdct4x4_c(input, output, stride);
173 : return;
174 : }
175 : #endif // DCT_HIGH_BIT_DEPTH
176 : // x0 = [b0 b1 b7 b6 b8 b9 bF bE]
177 : // x1 = [b4 b5 b3 b2 bC bD bB bA]
178 0 : in0 = _mm_shuffle_epi32(x0, 0xD8);
179 0 : in1 = _mm_shuffle_epi32(x1, 0x8D);
180 : // in0 = [b0 b1 b8 b9 b7 b6 bF bE]
181 : // in1 = [b3 b2 bB bA b4 b5 bC bD]
182 : }
183 : {
184 : // vertical DCTs finished. Now we do the horizontal DCTs.
185 : // Stage 3: Add/subtract
186 :
187 0 : const __m128i t0 = ADD_EPI16(in0, in1);
188 0 : const __m128i t1 = SUB_EPI16(in0, in1);
189 : // t0 = [c0 c1 c8 c9 c4 c5 cC cD]
190 : // t1 = [c3 c2 cB cA -c7 -c6 -cF -cE]
191 : #if DCT_HIGH_BIT_DEPTH
192 : overflow = check_epi16_overflow_x2(&t0, &t1);
193 : if (overflow) {
194 : vpx_highbd_fdct4x4_c(input, output, stride);
195 : return;
196 : }
197 : #endif // DCT_HIGH_BIT_DEPTH
198 :
199 : // Stage 4: multiply by constants (which gets us into 32 bits).
200 : {
201 : // The constants needed here are:
202 : // k__cospi_E = [p16 p16 p16 p16 p16 p16 p16 p16]
203 : // k__cospi_F = [p16 m16 p16 m16 p16 m16 p16 m16]
204 : // k__cospi_G = [p08 p24 p08 p24 m08 m24 m08 m24]
205 : // k__cospi_H = [p24 m08 p24 m08 m24 p08 m24 p08]
206 0 : const __m128i u0 = _mm_madd_epi16(t0, k__cospi_E);
207 0 : const __m128i u1 = _mm_madd_epi16(t0, k__cospi_F);
208 0 : const __m128i u2 = _mm_madd_epi16(t1, k__cospi_G);
209 0 : const __m128i u3 = _mm_madd_epi16(t1, k__cospi_H);
210 : // Then add and right-shift to get back to 16-bit range
211 : // but this combines the final right-shift as well to save operations
212 : // This unusual rounding operations is to maintain bit-accurate
213 : // compatibility with the c version of this function which has two
214 : // rounding steps in a row.
215 0 : const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING2);
216 0 : const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING2);
217 0 : const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING2);
218 0 : const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING2);
219 0 : const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS2);
220 0 : const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS2);
221 0 : const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS2);
222 0 : const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS2);
223 : // w0 = [o0 o4 o8 oC]
224 : // w1 = [o2 o6 oA oE]
225 : // w2 = [o1 o5 o9 oD]
226 : // w3 = [o3 o7 oB oF]
227 : // remember the o's are numbered according to the correct output location
228 0 : const __m128i x0 = _mm_packs_epi32(w0, w1);
229 0 : const __m128i x1 = _mm_packs_epi32(w2, w3);
230 : #if DCT_HIGH_BIT_DEPTH
231 : overflow = check_epi16_overflow_x2(&x0, &x1);
232 : if (overflow) {
233 : vpx_highbd_fdct4x4_c(input, output, stride);
234 : return;
235 : }
236 : #endif // DCT_HIGH_BIT_DEPTH
237 : {
238 : // x0 = [o0 o4 o8 oC o2 o6 oA oE]
239 : // x1 = [o1 o5 o9 oD o3 o7 oB oF]
240 0 : const __m128i y0 = _mm_unpacklo_epi16(x0, x1);
241 0 : const __m128i y1 = _mm_unpackhi_epi16(x0, x1);
242 : // y0 = [o0 o1 o4 o5 o8 o9 oC oD]
243 : // y1 = [o2 o3 o6 o7 oA oB oE oF]
244 0 : in0 = _mm_unpacklo_epi32(y0, y1);
245 : // in0 = [o0 o1 o2 o3 o4 o5 o6 o7]
246 0 : in1 = _mm_unpackhi_epi32(y0, y1);
247 : // in1 = [o8 o9 oA oB oC oD oE oF]
248 : }
249 : }
250 : }
251 : // Post-condition (v + 1) >> 2 is now incorporated into previous
252 : // add and right-shift commands. Only 2 store instructions needed
253 : // because we are using the fact that 1/3 are stored just after 0/2.
254 0 : storeu_output(&in0, output + 0 * 4);
255 0 : storeu_output(&in1, output + 2 * 4);
256 0 : }
257 :
258 0 : void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) {
259 : int pass;
260 : // Constants
261 : // When we use them, in one case, they are all the same. In all others
262 : // it's a pair of them that we need to repeat four times. This is done
263 : // by constructing the 32 bit constant corresponding to that pair.
264 0 : const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
265 0 : const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
266 0 : const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
267 0 : const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
268 0 : const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
269 0 : const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
270 0 : const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
271 0 : const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
272 0 : const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
273 : #if DCT_HIGH_BIT_DEPTH
274 : int overflow;
275 : #endif
276 : // Load input
277 0 : __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
278 0 : __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
279 0 : __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
280 0 : __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
281 0 : __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
282 0 : __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
283 0 : __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
284 0 : __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
285 : // Pre-condition input (shift by two)
286 0 : in0 = _mm_slli_epi16(in0, 2);
287 0 : in1 = _mm_slli_epi16(in1, 2);
288 0 : in2 = _mm_slli_epi16(in2, 2);
289 0 : in3 = _mm_slli_epi16(in3, 2);
290 0 : in4 = _mm_slli_epi16(in4, 2);
291 0 : in5 = _mm_slli_epi16(in5, 2);
292 0 : in6 = _mm_slli_epi16(in6, 2);
293 0 : in7 = _mm_slli_epi16(in7, 2);
294 :
295 : // We do two passes, first the columns, then the rows. The results of the
296 : // first pass are transposed so that the same column code can be reused. The
297 : // results of the second pass are also transposed so that the rows (processed
298 : // as columns) are put back in row positions.
299 0 : for (pass = 0; pass < 2; pass++) {
300 : // To store results of each pass before the transpose.
301 : __m128i res0, res1, res2, res3, res4, res5, res6, res7;
302 : // Add/subtract
303 0 : const __m128i q0 = ADD_EPI16(in0, in7);
304 0 : const __m128i q1 = ADD_EPI16(in1, in6);
305 0 : const __m128i q2 = ADD_EPI16(in2, in5);
306 0 : const __m128i q3 = ADD_EPI16(in3, in4);
307 0 : const __m128i q4 = SUB_EPI16(in3, in4);
308 0 : const __m128i q5 = SUB_EPI16(in2, in5);
309 0 : const __m128i q6 = SUB_EPI16(in1, in6);
310 0 : const __m128i q7 = SUB_EPI16(in0, in7);
311 : #if DCT_HIGH_BIT_DEPTH
312 : if (pass == 1) {
313 : overflow =
314 : check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
315 : if (overflow) {
316 : vpx_highbd_fdct8x8_c(input, output, stride);
317 : return;
318 : }
319 : }
320 : #endif // DCT_HIGH_BIT_DEPTH
321 : // Work on first four results
322 : {
323 : // Add/subtract
324 0 : const __m128i r0 = ADD_EPI16(q0, q3);
325 0 : const __m128i r1 = ADD_EPI16(q1, q2);
326 0 : const __m128i r2 = SUB_EPI16(q1, q2);
327 0 : const __m128i r3 = SUB_EPI16(q0, q3);
328 : #if DCT_HIGH_BIT_DEPTH
329 : overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
330 : if (overflow) {
331 : vpx_highbd_fdct8x8_c(input, output, stride);
332 : return;
333 : }
334 : #endif // DCT_HIGH_BIT_DEPTH
335 : // Interleave to do the multiply by constants which gets us into 32bits
336 : {
337 0 : const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
338 0 : const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
339 0 : const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
340 0 : const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
341 0 : const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
342 0 : const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
343 0 : const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
344 0 : const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
345 0 : const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
346 0 : const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
347 0 : const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
348 0 : const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
349 : // dct_const_round_shift
350 0 : const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
351 0 : const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
352 0 : const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
353 0 : const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
354 0 : const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
355 0 : const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
356 0 : const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
357 0 : const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
358 0 : const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
359 0 : const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
360 0 : const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
361 0 : const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
362 0 : const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
363 0 : const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
364 0 : const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
365 0 : const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
366 : // Combine
367 0 : res0 = _mm_packs_epi32(w0, w1);
368 0 : res4 = _mm_packs_epi32(w2, w3);
369 0 : res2 = _mm_packs_epi32(w4, w5);
370 0 : res6 = _mm_packs_epi32(w6, w7);
371 : #if DCT_HIGH_BIT_DEPTH
372 : overflow = check_epi16_overflow_x4(&res0, &res4, &res2, &res6);
373 : if (overflow) {
374 : vpx_highbd_fdct8x8_c(input, output, stride);
375 : return;
376 : }
377 : #endif // DCT_HIGH_BIT_DEPTH
378 : }
379 : }
380 : // Work on next four results
381 : {
382 : // Interleave to do the multiply by constants which gets us into 32bits
383 0 : const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
384 0 : const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
385 0 : const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
386 0 : const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
387 0 : const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
388 0 : const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
389 : // dct_const_round_shift
390 0 : const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
391 0 : const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
392 0 : const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
393 0 : const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
394 0 : const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
395 0 : const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
396 0 : const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
397 0 : const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
398 : // Combine
399 0 : const __m128i r0 = _mm_packs_epi32(s0, s1);
400 0 : const __m128i r1 = _mm_packs_epi32(s2, s3);
401 : #if DCT_HIGH_BIT_DEPTH
402 : overflow = check_epi16_overflow_x2(&r0, &r1);
403 : if (overflow) {
404 : vpx_highbd_fdct8x8_c(input, output, stride);
405 : return;
406 : }
407 : #endif // DCT_HIGH_BIT_DEPTH
408 : {
409 : // Add/subtract
410 0 : const __m128i x0 = ADD_EPI16(q4, r0);
411 0 : const __m128i x1 = SUB_EPI16(q4, r0);
412 0 : const __m128i x2 = SUB_EPI16(q7, r1);
413 0 : const __m128i x3 = ADD_EPI16(q7, r1);
414 : #if DCT_HIGH_BIT_DEPTH
415 : overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
416 : if (overflow) {
417 : vpx_highbd_fdct8x8_c(input, output, stride);
418 : return;
419 : }
420 : #endif // DCT_HIGH_BIT_DEPTH
421 : // Interleave to do the multiply by constants which gets us into 32bits
422 : {
423 0 : const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
424 0 : const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
425 0 : const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
426 0 : const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
427 0 : const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
428 0 : const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
429 0 : const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
430 0 : const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
431 0 : const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
432 0 : const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
433 0 : const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
434 0 : const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
435 : // dct_const_round_shift
436 0 : const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
437 0 : const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
438 0 : const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
439 0 : const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
440 0 : const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
441 0 : const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
442 0 : const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
443 0 : const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
444 0 : const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
445 0 : const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
446 0 : const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
447 0 : const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
448 0 : const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
449 0 : const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
450 0 : const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
451 0 : const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
452 : // Combine
453 0 : res1 = _mm_packs_epi32(w0, w1);
454 0 : res7 = _mm_packs_epi32(w2, w3);
455 0 : res5 = _mm_packs_epi32(w4, w5);
456 0 : res3 = _mm_packs_epi32(w6, w7);
457 : #if DCT_HIGH_BIT_DEPTH
458 : overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3);
459 : if (overflow) {
460 : vpx_highbd_fdct8x8_c(input, output, stride);
461 : return;
462 : }
463 : #endif // DCT_HIGH_BIT_DEPTH
464 : }
465 : }
466 : }
467 : // Transpose the 8x8.
468 : {
469 : // 00 01 02 03 04 05 06 07
470 : // 10 11 12 13 14 15 16 17
471 : // 20 21 22 23 24 25 26 27
472 : // 30 31 32 33 34 35 36 37
473 : // 40 41 42 43 44 45 46 47
474 : // 50 51 52 53 54 55 56 57
475 : // 60 61 62 63 64 65 66 67
476 : // 70 71 72 73 74 75 76 77
477 0 : const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
478 0 : const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3);
479 0 : const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1);
480 0 : const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3);
481 0 : const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5);
482 0 : const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7);
483 0 : const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5);
484 0 : const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7);
485 : // 00 10 01 11 02 12 03 13
486 : // 20 30 21 31 22 32 23 33
487 : // 04 14 05 15 06 16 07 17
488 : // 24 34 25 35 26 36 27 37
489 : // 40 50 41 51 42 52 43 53
490 : // 60 70 61 71 62 72 63 73
491 : // 54 54 55 55 56 56 57 57
492 : // 64 74 65 75 66 76 67 77
493 0 : const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
494 0 : const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
495 0 : const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
496 0 : const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
497 0 : const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
498 0 : const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
499 0 : const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
500 0 : const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
501 : // 00 10 20 30 01 11 21 31
502 : // 40 50 60 70 41 51 61 71
503 : // 02 12 22 32 03 13 23 33
504 : // 42 52 62 72 43 53 63 73
505 : // 04 14 24 34 05 15 21 36
506 : // 44 54 64 74 45 55 61 76
507 : // 06 16 26 36 07 17 27 37
508 : // 46 56 66 76 47 57 67 77
509 0 : in0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
510 0 : in1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
511 0 : in2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
512 0 : in3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
513 0 : in4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
514 0 : in5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
515 0 : in6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
516 0 : in7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
517 : // 00 10 20 30 40 50 60 70
518 : // 01 11 21 31 41 51 61 71
519 : // 02 12 22 32 42 52 62 72
520 : // 03 13 23 33 43 53 63 73
521 : // 04 14 24 34 44 54 64 74
522 : // 05 15 25 35 45 55 65 75
523 : // 06 16 26 36 46 56 66 76
524 : // 07 17 27 37 47 57 67 77
525 : }
526 : }
527 : // Post-condition output and store it
528 : {
529 : // Post-condition (division by two)
530 : // division of two 16 bits signed numbers using shifts
531 : // n / 2 = (n - (n >> 15)) >> 1
532 0 : const __m128i sign_in0 = _mm_srai_epi16(in0, 15);
533 0 : const __m128i sign_in1 = _mm_srai_epi16(in1, 15);
534 0 : const __m128i sign_in2 = _mm_srai_epi16(in2, 15);
535 0 : const __m128i sign_in3 = _mm_srai_epi16(in3, 15);
536 0 : const __m128i sign_in4 = _mm_srai_epi16(in4, 15);
537 0 : const __m128i sign_in5 = _mm_srai_epi16(in5, 15);
538 0 : const __m128i sign_in6 = _mm_srai_epi16(in6, 15);
539 0 : const __m128i sign_in7 = _mm_srai_epi16(in7, 15);
540 0 : in0 = _mm_sub_epi16(in0, sign_in0);
541 0 : in1 = _mm_sub_epi16(in1, sign_in1);
542 0 : in2 = _mm_sub_epi16(in2, sign_in2);
543 0 : in3 = _mm_sub_epi16(in3, sign_in3);
544 0 : in4 = _mm_sub_epi16(in4, sign_in4);
545 0 : in5 = _mm_sub_epi16(in5, sign_in5);
546 0 : in6 = _mm_sub_epi16(in6, sign_in6);
547 0 : in7 = _mm_sub_epi16(in7, sign_in7);
548 0 : in0 = _mm_srai_epi16(in0, 1);
549 0 : in1 = _mm_srai_epi16(in1, 1);
550 0 : in2 = _mm_srai_epi16(in2, 1);
551 0 : in3 = _mm_srai_epi16(in3, 1);
552 0 : in4 = _mm_srai_epi16(in4, 1);
553 0 : in5 = _mm_srai_epi16(in5, 1);
554 0 : in6 = _mm_srai_epi16(in6, 1);
555 0 : in7 = _mm_srai_epi16(in7, 1);
556 : // store results
557 0 : store_output(&in0, (output + 0 * 8));
558 0 : store_output(&in1, (output + 1 * 8));
559 0 : store_output(&in2, (output + 2 * 8));
560 0 : store_output(&in3, (output + 3 * 8));
561 0 : store_output(&in4, (output + 4 * 8));
562 0 : store_output(&in5, (output + 5 * 8));
563 0 : store_output(&in6, (output + 6 * 8));
564 0 : store_output(&in7, (output + 7 * 8));
565 : }
566 0 : }
567 :
568 0 : void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) {
569 : // The 2D transform is done with two passes which are actually pretty
570 : // similar. In the first one, we transform the columns and transpose
571 : // the results. In the second one, we transform the rows. To achieve that,
572 : // as the first pass results are transposed, we transpose the columns (that
573 : // is the transposed rows) and transpose the results (so that it goes back
574 : // in normal/row positions).
575 : int pass;
576 : // We need an intermediate buffer between passes.
577 : DECLARE_ALIGNED(16, int16_t, intermediate[256]);
578 0 : const int16_t *in = input;
579 0 : int16_t *out0 = intermediate;
580 0 : tran_low_t *out1 = output;
581 : // Constants
582 : // When we use them, in one case, they are all the same. In all others
583 : // it's a pair of them that we need to repeat four times. This is done
584 : // by constructing the 32 bit constant corresponding to that pair.
585 0 : const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
586 0 : const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
587 0 : const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
588 0 : const __m128i k__cospi_p08_m24 = pair_set_epi16(cospi_8_64, -cospi_24_64);
589 0 : const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
590 0 : const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
591 0 : const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
592 0 : const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
593 0 : const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
594 0 : const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64);
595 0 : const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64);
596 0 : const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
597 0 : const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
598 0 : const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64);
599 0 : const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64);
600 0 : const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
601 0 : const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
602 0 : const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
603 0 : const __m128i kOne = _mm_set1_epi16(1);
604 : // Do the two transform/transpose passes
605 0 : for (pass = 0; pass < 2; ++pass) {
606 : // We process eight columns (transposed rows in second pass) at a time.
607 : int column_start;
608 : #if DCT_HIGH_BIT_DEPTH
609 : int overflow;
610 : #endif
611 0 : for (column_start = 0; column_start < 16; column_start += 8) {
612 : __m128i in00, in01, in02, in03, in04, in05, in06, in07;
613 : __m128i in08, in09, in10, in11, in12, in13, in14, in15;
614 : __m128i input0, input1, input2, input3, input4, input5, input6, input7;
615 : __m128i step1_0, step1_1, step1_2, step1_3;
616 : __m128i step1_4, step1_5, step1_6, step1_7;
617 : __m128i step2_1, step2_2, step2_3, step2_4, step2_5, step2_6;
618 : __m128i step3_0, step3_1, step3_2, step3_3;
619 : __m128i step3_4, step3_5, step3_6, step3_7;
620 : __m128i res00, res01, res02, res03, res04, res05, res06, res07;
621 : __m128i res08, res09, res10, res11, res12, res13, res14, res15;
622 : // Load and pre-condition input.
623 0 : if (0 == pass) {
624 0 : in00 = _mm_load_si128((const __m128i *)(in + 0 * stride));
625 0 : in01 = _mm_load_si128((const __m128i *)(in + 1 * stride));
626 0 : in02 = _mm_load_si128((const __m128i *)(in + 2 * stride));
627 0 : in03 = _mm_load_si128((const __m128i *)(in + 3 * stride));
628 0 : in04 = _mm_load_si128((const __m128i *)(in + 4 * stride));
629 0 : in05 = _mm_load_si128((const __m128i *)(in + 5 * stride));
630 0 : in06 = _mm_load_si128((const __m128i *)(in + 6 * stride));
631 0 : in07 = _mm_load_si128((const __m128i *)(in + 7 * stride));
632 0 : in08 = _mm_load_si128((const __m128i *)(in + 8 * stride));
633 0 : in09 = _mm_load_si128((const __m128i *)(in + 9 * stride));
634 0 : in10 = _mm_load_si128((const __m128i *)(in + 10 * stride));
635 0 : in11 = _mm_load_si128((const __m128i *)(in + 11 * stride));
636 0 : in12 = _mm_load_si128((const __m128i *)(in + 12 * stride));
637 0 : in13 = _mm_load_si128((const __m128i *)(in + 13 * stride));
638 0 : in14 = _mm_load_si128((const __m128i *)(in + 14 * stride));
639 0 : in15 = _mm_load_si128((const __m128i *)(in + 15 * stride));
640 : // x = x << 2
641 0 : in00 = _mm_slli_epi16(in00, 2);
642 0 : in01 = _mm_slli_epi16(in01, 2);
643 0 : in02 = _mm_slli_epi16(in02, 2);
644 0 : in03 = _mm_slli_epi16(in03, 2);
645 0 : in04 = _mm_slli_epi16(in04, 2);
646 0 : in05 = _mm_slli_epi16(in05, 2);
647 0 : in06 = _mm_slli_epi16(in06, 2);
648 0 : in07 = _mm_slli_epi16(in07, 2);
649 0 : in08 = _mm_slli_epi16(in08, 2);
650 0 : in09 = _mm_slli_epi16(in09, 2);
651 0 : in10 = _mm_slli_epi16(in10, 2);
652 0 : in11 = _mm_slli_epi16(in11, 2);
653 0 : in12 = _mm_slli_epi16(in12, 2);
654 0 : in13 = _mm_slli_epi16(in13, 2);
655 0 : in14 = _mm_slli_epi16(in14, 2);
656 0 : in15 = _mm_slli_epi16(in15, 2);
657 : } else {
658 0 : in00 = _mm_load_si128((const __m128i *)(in + 0 * 16));
659 0 : in01 = _mm_load_si128((const __m128i *)(in + 1 * 16));
660 0 : in02 = _mm_load_si128((const __m128i *)(in + 2 * 16));
661 0 : in03 = _mm_load_si128((const __m128i *)(in + 3 * 16));
662 0 : in04 = _mm_load_si128((const __m128i *)(in + 4 * 16));
663 0 : in05 = _mm_load_si128((const __m128i *)(in + 5 * 16));
664 0 : in06 = _mm_load_si128((const __m128i *)(in + 6 * 16));
665 0 : in07 = _mm_load_si128((const __m128i *)(in + 7 * 16));
666 0 : in08 = _mm_load_si128((const __m128i *)(in + 8 * 16));
667 0 : in09 = _mm_load_si128((const __m128i *)(in + 9 * 16));
668 0 : in10 = _mm_load_si128((const __m128i *)(in + 10 * 16));
669 0 : in11 = _mm_load_si128((const __m128i *)(in + 11 * 16));
670 0 : in12 = _mm_load_si128((const __m128i *)(in + 12 * 16));
671 0 : in13 = _mm_load_si128((const __m128i *)(in + 13 * 16));
672 0 : in14 = _mm_load_si128((const __m128i *)(in + 14 * 16));
673 0 : in15 = _mm_load_si128((const __m128i *)(in + 15 * 16));
674 : // x = (x + 1) >> 2
675 0 : in00 = _mm_add_epi16(in00, kOne);
676 0 : in01 = _mm_add_epi16(in01, kOne);
677 0 : in02 = _mm_add_epi16(in02, kOne);
678 0 : in03 = _mm_add_epi16(in03, kOne);
679 0 : in04 = _mm_add_epi16(in04, kOne);
680 0 : in05 = _mm_add_epi16(in05, kOne);
681 0 : in06 = _mm_add_epi16(in06, kOne);
682 0 : in07 = _mm_add_epi16(in07, kOne);
683 0 : in08 = _mm_add_epi16(in08, kOne);
684 0 : in09 = _mm_add_epi16(in09, kOne);
685 0 : in10 = _mm_add_epi16(in10, kOne);
686 0 : in11 = _mm_add_epi16(in11, kOne);
687 0 : in12 = _mm_add_epi16(in12, kOne);
688 0 : in13 = _mm_add_epi16(in13, kOne);
689 0 : in14 = _mm_add_epi16(in14, kOne);
690 0 : in15 = _mm_add_epi16(in15, kOne);
691 0 : in00 = _mm_srai_epi16(in00, 2);
692 0 : in01 = _mm_srai_epi16(in01, 2);
693 0 : in02 = _mm_srai_epi16(in02, 2);
694 0 : in03 = _mm_srai_epi16(in03, 2);
695 0 : in04 = _mm_srai_epi16(in04, 2);
696 0 : in05 = _mm_srai_epi16(in05, 2);
697 0 : in06 = _mm_srai_epi16(in06, 2);
698 0 : in07 = _mm_srai_epi16(in07, 2);
699 0 : in08 = _mm_srai_epi16(in08, 2);
700 0 : in09 = _mm_srai_epi16(in09, 2);
701 0 : in10 = _mm_srai_epi16(in10, 2);
702 0 : in11 = _mm_srai_epi16(in11, 2);
703 0 : in12 = _mm_srai_epi16(in12, 2);
704 0 : in13 = _mm_srai_epi16(in13, 2);
705 0 : in14 = _mm_srai_epi16(in14, 2);
706 0 : in15 = _mm_srai_epi16(in15, 2);
707 : }
708 0 : in += 8;
709 : // Calculate input for the first 8 results.
710 : {
711 0 : input0 = ADD_EPI16(in00, in15);
712 0 : input1 = ADD_EPI16(in01, in14);
713 0 : input2 = ADD_EPI16(in02, in13);
714 0 : input3 = ADD_EPI16(in03, in12);
715 0 : input4 = ADD_EPI16(in04, in11);
716 0 : input5 = ADD_EPI16(in05, in10);
717 0 : input6 = ADD_EPI16(in06, in09);
718 0 : input7 = ADD_EPI16(in07, in08);
719 : #if DCT_HIGH_BIT_DEPTH
720 : overflow = check_epi16_overflow_x8(&input0, &input1, &input2, &input3,
721 : &input4, &input5, &input6, &input7);
722 : if (overflow) {
723 : vpx_highbd_fdct16x16_c(input, output, stride);
724 : return;
725 : }
726 : #endif // DCT_HIGH_BIT_DEPTH
727 : }
728 : // Calculate input for the next 8 results.
729 : {
730 0 : step1_0 = SUB_EPI16(in07, in08);
731 0 : step1_1 = SUB_EPI16(in06, in09);
732 0 : step1_2 = SUB_EPI16(in05, in10);
733 0 : step1_3 = SUB_EPI16(in04, in11);
734 0 : step1_4 = SUB_EPI16(in03, in12);
735 0 : step1_5 = SUB_EPI16(in02, in13);
736 0 : step1_6 = SUB_EPI16(in01, in14);
737 0 : step1_7 = SUB_EPI16(in00, in15);
738 : #if DCT_HIGH_BIT_DEPTH
739 : overflow =
740 : check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
741 : &step1_4, &step1_5, &step1_6, &step1_7);
742 : if (overflow) {
743 : vpx_highbd_fdct16x16_c(input, output, stride);
744 : return;
745 : }
746 : #endif // DCT_HIGH_BIT_DEPTH
747 : }
748 : // Work on the first eight values; fdct8(input, even_results);
749 : {
750 : // Add/subtract
751 0 : const __m128i q0 = ADD_EPI16(input0, input7);
752 0 : const __m128i q1 = ADD_EPI16(input1, input6);
753 0 : const __m128i q2 = ADD_EPI16(input2, input5);
754 0 : const __m128i q3 = ADD_EPI16(input3, input4);
755 0 : const __m128i q4 = SUB_EPI16(input3, input4);
756 0 : const __m128i q5 = SUB_EPI16(input2, input5);
757 0 : const __m128i q6 = SUB_EPI16(input1, input6);
758 0 : const __m128i q7 = SUB_EPI16(input0, input7);
759 : #if DCT_HIGH_BIT_DEPTH
760 : overflow =
761 : check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7);
762 : if (overflow) {
763 : vpx_highbd_fdct16x16_c(input, output, stride);
764 : return;
765 : }
766 : #endif // DCT_HIGH_BIT_DEPTH
767 : // Work on first four results
768 : {
769 : // Add/subtract
770 0 : const __m128i r0 = ADD_EPI16(q0, q3);
771 0 : const __m128i r1 = ADD_EPI16(q1, q2);
772 0 : const __m128i r2 = SUB_EPI16(q1, q2);
773 0 : const __m128i r3 = SUB_EPI16(q0, q3);
774 : #if DCT_HIGH_BIT_DEPTH
775 : overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3);
776 : if (overflow) {
777 : vpx_highbd_fdct16x16_c(input, output, stride);
778 : return;
779 : }
780 : #endif // DCT_HIGH_BIT_DEPTH
781 : // Interleave to do the multiply by constants which gets us
782 : // into 32 bits.
783 : {
784 0 : const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
785 0 : const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
786 0 : const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
787 0 : const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
788 0 : res00 = mult_round_shift(&t0, &t1, &k__cospi_p16_p16,
789 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
790 0 : res08 = mult_round_shift(&t0, &t1, &k__cospi_p16_m16,
791 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
792 0 : res04 = mult_round_shift(&t2, &t3, &k__cospi_p24_p08,
793 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
794 0 : res12 = mult_round_shift(&t2, &t3, &k__cospi_m08_p24,
795 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
796 : #if DCT_HIGH_BIT_DEPTH
797 : overflow = check_epi16_overflow_x4(&res00, &res08, &res04, &res12);
798 : if (overflow) {
799 : vpx_highbd_fdct16x16_c(input, output, stride);
800 : return;
801 : }
802 : #endif // DCT_HIGH_BIT_DEPTH
803 : }
804 : }
805 : // Work on next four results
806 : {
807 : // Interleave to do the multiply by constants which gets us
808 : // into 32 bits.
809 0 : const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
810 0 : const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
811 0 : const __m128i r0 =
812 : mult_round_shift(&d0, &d1, &k__cospi_p16_m16,
813 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
814 0 : const __m128i r1 =
815 : mult_round_shift(&d0, &d1, &k__cospi_p16_p16,
816 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
817 : #if DCT_HIGH_BIT_DEPTH
818 : overflow = check_epi16_overflow_x2(&r0, &r1);
819 : if (overflow) {
820 : vpx_highbd_fdct16x16_c(input, output, stride);
821 : return;
822 : }
823 : #endif // DCT_HIGH_BIT_DEPTH
824 : {
825 : // Add/subtract
826 0 : const __m128i x0 = ADD_EPI16(q4, r0);
827 0 : const __m128i x1 = SUB_EPI16(q4, r0);
828 0 : const __m128i x2 = SUB_EPI16(q7, r1);
829 0 : const __m128i x3 = ADD_EPI16(q7, r1);
830 : #if DCT_HIGH_BIT_DEPTH
831 : overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3);
832 : if (overflow) {
833 : vpx_highbd_fdct16x16_c(input, output, stride);
834 : return;
835 : }
836 : #endif // DCT_HIGH_BIT_DEPTH
837 : // Interleave to do the multiply by constants which gets us
838 : // into 32 bits.
839 : {
840 0 : const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
841 0 : const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
842 0 : const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
843 0 : const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
844 0 : res02 = mult_round_shift(&t0, &t1, &k__cospi_p28_p04,
845 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
846 0 : res14 = mult_round_shift(&t0, &t1, &k__cospi_m04_p28,
847 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
848 0 : res10 = mult_round_shift(&t2, &t3, &k__cospi_p12_p20,
849 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
850 0 : res06 = mult_round_shift(&t2, &t3, &k__cospi_m20_p12,
851 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
852 : #if DCT_HIGH_BIT_DEPTH
853 : overflow =
854 : check_epi16_overflow_x4(&res02, &res14, &res10, &res06);
855 : if (overflow) {
856 : vpx_highbd_fdct16x16_c(input, output, stride);
857 : return;
858 : }
859 : #endif // DCT_HIGH_BIT_DEPTH
860 : }
861 : }
862 : }
863 : }
864 : // Work on the next eight values; step1 -> odd_results
865 : {
866 : // step 2
867 : {
868 0 : const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2);
869 0 : const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2);
870 0 : const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3);
871 0 : const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3);
872 0 : step2_2 = mult_round_shift(&t0, &t1, &k__cospi_p16_m16,
873 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
874 0 : step2_3 = mult_round_shift(&t2, &t3, &k__cospi_p16_m16,
875 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
876 0 : step2_5 = mult_round_shift(&t0, &t1, &k__cospi_p16_p16,
877 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
878 0 : step2_4 = mult_round_shift(&t2, &t3, &k__cospi_p16_p16,
879 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
880 : #if DCT_HIGH_BIT_DEPTH
881 : overflow =
882 : check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4);
883 : if (overflow) {
884 : vpx_highbd_fdct16x16_c(input, output, stride);
885 : return;
886 : }
887 : #endif // DCT_HIGH_BIT_DEPTH
888 : }
889 : // step 3
890 : {
891 0 : step3_0 = ADD_EPI16(step1_0, step2_3);
892 0 : step3_1 = ADD_EPI16(step1_1, step2_2);
893 0 : step3_2 = SUB_EPI16(step1_1, step2_2);
894 0 : step3_3 = SUB_EPI16(step1_0, step2_3);
895 0 : step3_4 = SUB_EPI16(step1_7, step2_4);
896 0 : step3_5 = SUB_EPI16(step1_6, step2_5);
897 0 : step3_6 = ADD_EPI16(step1_6, step2_5);
898 0 : step3_7 = ADD_EPI16(step1_7, step2_4);
899 : #if DCT_HIGH_BIT_DEPTH
900 : overflow =
901 : check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3,
902 : &step3_4, &step3_5, &step3_6, &step3_7);
903 : if (overflow) {
904 : vpx_highbd_fdct16x16_c(input, output, stride);
905 : return;
906 : }
907 : #endif // DCT_HIGH_BIT_DEPTH
908 : }
909 : // step 4
910 : {
911 0 : const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6);
912 0 : const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6);
913 0 : const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5);
914 0 : const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5);
915 0 : step2_1 = mult_round_shift(&t0, &t1, &k__cospi_m08_p24,
916 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
917 0 : step2_2 = mult_round_shift(&t2, &t3, &k__cospi_p24_p08,
918 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
919 0 : step2_6 = mult_round_shift(&t0, &t1, &k__cospi_p24_p08,
920 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
921 0 : step2_5 = mult_round_shift(&t2, &t3, &k__cospi_p08_m24,
922 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
923 : #if DCT_HIGH_BIT_DEPTH
924 : overflow =
925 : check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5);
926 : if (overflow) {
927 : vpx_highbd_fdct16x16_c(input, output, stride);
928 : return;
929 : }
930 : #endif // DCT_HIGH_BIT_DEPTH
931 : }
932 : // step 5
933 : {
934 0 : step1_0 = ADD_EPI16(step3_0, step2_1);
935 0 : step1_1 = SUB_EPI16(step3_0, step2_1);
936 0 : step1_2 = ADD_EPI16(step3_3, step2_2);
937 0 : step1_3 = SUB_EPI16(step3_3, step2_2);
938 0 : step1_4 = SUB_EPI16(step3_4, step2_5);
939 0 : step1_5 = ADD_EPI16(step3_4, step2_5);
940 0 : step1_6 = SUB_EPI16(step3_7, step2_6);
941 0 : step1_7 = ADD_EPI16(step3_7, step2_6);
942 : #if DCT_HIGH_BIT_DEPTH
943 : overflow =
944 : check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3,
945 : &step1_4, &step1_5, &step1_6, &step1_7);
946 : if (overflow) {
947 : vpx_highbd_fdct16x16_c(input, output, stride);
948 : return;
949 : }
950 : #endif // DCT_HIGH_BIT_DEPTH
951 : }
952 : // step 6
953 : {
954 0 : const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7);
955 0 : const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7);
956 0 : const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6);
957 0 : const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6);
958 0 : res01 = mult_round_shift(&t0, &t1, &k__cospi_p30_p02,
959 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
960 0 : res09 = mult_round_shift(&t2, &t3, &k__cospi_p14_p18,
961 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
962 0 : res15 = mult_round_shift(&t0, &t1, &k__cospi_m02_p30,
963 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
964 0 : res07 = mult_round_shift(&t2, &t3, &k__cospi_m18_p14,
965 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
966 : #if DCT_HIGH_BIT_DEPTH
967 : overflow = check_epi16_overflow_x4(&res01, &res09, &res15, &res07);
968 : if (overflow) {
969 : vpx_highbd_fdct16x16_c(input, output, stride);
970 : return;
971 : }
972 : #endif // DCT_HIGH_BIT_DEPTH
973 : }
974 : {
975 0 : const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5);
976 0 : const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5);
977 0 : const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4);
978 0 : const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4);
979 0 : res05 = mult_round_shift(&t0, &t1, &k__cospi_p22_p10,
980 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
981 0 : res13 = mult_round_shift(&t2, &t3, &k__cospi_p06_p26,
982 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
983 0 : res11 = mult_round_shift(&t0, &t1, &k__cospi_m10_p22,
984 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
985 0 : res03 = mult_round_shift(&t2, &t3, &k__cospi_m26_p06,
986 : &k__DCT_CONST_ROUNDING, DCT_CONST_BITS);
987 : #if DCT_HIGH_BIT_DEPTH
988 : overflow = check_epi16_overflow_x4(&res05, &res13, &res11, &res03);
989 : if (overflow) {
990 : vpx_highbd_fdct16x16_c(input, output, stride);
991 : return;
992 : }
993 : #endif // DCT_HIGH_BIT_DEPTH
994 : }
995 : }
996 : // Transpose the results, do it as two 8x8 transposes.
997 0 : transpose_and_output8x8(&res00, &res01, &res02, &res03, &res04, &res05,
998 : &res06, &res07, pass, out0, out1);
999 0 : transpose_and_output8x8(&res08, &res09, &res10, &res11, &res12, &res13,
1000 : &res14, &res15, pass, out0 + 8, out1 + 8);
1001 0 : if (pass == 0) {
1002 0 : out0 += 8 * 16;
1003 : } else {
1004 0 : out1 += 8 * 16;
1005 : }
1006 : }
1007 : // Setup in/out for next pass.
1008 0 : in = intermediate;
1009 : }
1010 0 : }
1011 :
1012 : #undef ADD_EPI16
1013 : #undef SUB_EPI16
|