Line data Source code
1 : /*
2 : * Copyright 2017 Google Inc.
3 : *
4 : * Use of this source code is governed by a BSD-style license that can be
5 : * found in the LICENSE file.
6 : */
7 :
8 : #ifndef SkJumper_vectors_DEFINED
9 : #define SkJumper_vectors_DEFINED
10 :
11 : #include "SkJumper.h"
12 : #include "SkJumper_misc.h"
13 :
14 : // This file contains vector types that SkJumper_stages.cpp uses to define stages.
15 :
16 : // Every function in this file should be marked static and inline using SI (see SkJumper_misc.h).
17 :
18 : #if !defined(JUMPER)
19 : // This path should lead to portable code that can be compiled directly into Skia.
20 : // (All other paths are compiled offline by Clang into SkJumper_generated.S.)
21 : #include <math.h>
22 :
23 : using F = float ;
24 : using I32 = int32_t;
25 : using U64 = uint64_t;
26 : using U32 = uint32_t;
27 : using U16 = uint16_t;
28 : using U8 = uint8_t ;
29 :
30 0 : SI F mad(F f, F m, F a) { return f*m+a; }
31 0 : SI F min(F a, F b) { return fminf(a,b); }
32 0 : SI F max(F a, F b) { return fmaxf(a,b); }
33 0 : SI F abs_ (F v) { return fabsf(v); }
34 0 : SI F floor_(F v) { return floorf(v); }
35 0 : SI F rcp (F v) { return 1.0f / v; }
36 0 : SI F rsqrt (F v) { return 1.0f / sqrtf(v); }
37 0 : SI U32 round (F v, F scale) { return (uint32_t)lrintf(v*scale); }
38 0 : SI U16 pack(U32 v) { return (U16)v; }
39 0 : SI U8 pack(U16 v) { return (U8)v; }
40 :
41 0 : SI F if_then_else(I32 c, F t, F e) { return c ? t : e; }
42 :
43 : template <typename T>
44 0 : SI T gather(const T* p, U32 ix) { return p[ix]; }
45 :
46 0 : SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
47 0 : *r = ptr[0];
48 0 : *g = ptr[1];
49 0 : *b = ptr[2];
50 0 : *a = ptr[3];
51 0 : }
52 0 : SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
53 0 : ptr[0] = r;
54 0 : ptr[1] = g;
55 0 : ptr[2] = b;
56 0 : ptr[3] = a;
57 0 : }
58 :
59 0 : SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
60 0 : *r = ptr[0];
61 0 : *g = ptr[1];
62 0 : *b = ptr[2];
63 0 : *a = ptr[3];
64 0 : }
65 0 : SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
66 0 : ptr[0] = r;
67 0 : ptr[1] = g;
68 0 : ptr[2] = b;
69 0 : ptr[3] = a;
70 0 : }
71 :
72 0 : SI F from_half(U16 h) {
73 0 : if ((int16_t)h < 0x0400) { h = 0; } // Flush denorm and negative to zero.
74 0 : return bit_cast<F>(h << 13) // Line up the mantissa,
75 0 : * bit_cast<F>(U32(0x77800000)); // then fix up the exponent.
76 : }
77 0 : SI U16 to_half(F f) {
78 0 : return bit_cast<U32>(f * bit_cast<F>(U32(0x07800000_i))) // Fix up the exponent,
79 0 : >> 13; // then line up the mantissa.
80 : }
81 :
82 : #elif defined(__aarch64__)
83 : #include <arm_neon.h>
84 :
85 : // Since we know we're using Clang, we can use its vector extensions.
86 : template <typename T> using V = T __attribute__((ext_vector_type(4)));
87 : using F = V<float >;
88 : using I32 = V< int32_t>;
89 : using U64 = V<uint64_t>;
90 : using U32 = V<uint32_t>;
91 : using U16 = V<uint16_t>;
92 : using U8 = V<uint8_t >;
93 :
94 : // We polyfill a few routines that Clang doesn't build into ext_vector_types.
95 : SI F mad(F f, F m, F a) { return vfmaq_f32(a,f,m); }
96 : SI F min(F a, F b) { return vminq_f32(a,b); }
97 : SI F max(F a, F b) { return vmaxq_f32(a,b); }
98 : SI F abs_ (F v) { return vabsq_f32(v); }
99 : SI F floor_(F v) { return vrndmq_f32(v); }
100 : SI F rcp (F v) { auto e = vrecpeq_f32 (v); return vrecpsq_f32 (v,e ) * e; }
101 : SI F rsqrt (F v) { auto e = vrsqrteq_f32(v); return vrsqrtsq_f32(v,e*e) * e; }
102 : SI U32 round (F v, F scale) { return vcvtnq_u32_f32(v*scale); }
103 : SI U16 pack(U32 v) { return __builtin_convertvector(v, U16); }
104 : SI U8 pack(U16 v) { return __builtin_convertvector(v, U8); }
105 :
106 : SI F if_then_else(I32 c, F t, F e) { return vbslq_f32((U32)c,t,e); }
107 :
108 : template <typename T>
109 : SI V<T> gather(const T* p, U32 ix) {
110 : return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
111 : }
112 :
113 : SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
114 : uint16x4x4_t rgba = vld4_u16(ptr);
115 : *r = rgba.val[0];
116 : *g = rgba.val[1];
117 : *b = rgba.val[2];
118 : *a = rgba.val[3];
119 : }
120 : SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
121 : vst4_u16(ptr, (uint16x4x4_t{{r,g,b,a}}));
122 : }
123 :
124 : SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
125 : float32x4x4_t rgba = vld4q_f32(ptr);
126 : *r = rgba.val[0];
127 : *g = rgba.val[1];
128 : *b = rgba.val[2];
129 : *a = rgba.val[3];
130 : }
131 : SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
132 : vst4q_f32(ptr, (float32x4x4_t{{r,g,b,a}}));
133 : }
134 :
135 : SI F from_half(U16 h) { return vcvt_f32_f16(h); }
136 : SI U16 to_half(F f) { return vcvt_f16_f32(f); }
137 :
138 : #elif defined(__arm__)
139 : #if defined(__thumb2__) || !defined(__ARM_ARCH_7A__) || !defined(__ARM_VFPV4__)
140 : #error On ARMv7, compile with -march=armv7-a -mfpu=neon-vfp4, without -mthumb.
141 : #endif
142 : #include <arm_neon.h>
143 :
144 : // We can pass {s0-s15} as arguments under AAPCS-VFP. We'll slice that as 8 d-registers.
145 : template <typename T> using V = T __attribute__((ext_vector_type(2)));
146 : using F = V<float >;
147 : using I32 = V< int32_t>;
148 : using U64 = V<uint64_t>;
149 : using U32 = V<uint32_t>;
150 : using U16 = V<uint16_t>;
151 : using U8 = V<uint8_t >;
152 :
153 : SI F mad(F f, F m, F a) { return vfma_f32(a,f,m); }
154 : SI F min(F a, F b) { return vmin_f32(a,b); }
155 : SI F max(F a, F b) { return vmax_f32(a,b); }
156 : SI F abs_ (F v) { return vabs_f32(v); }
157 : SI F rcp (F v) { auto e = vrecpe_f32 (v); return vrecps_f32 (v,e ) * e; }
158 : SI F rsqrt(F v) { auto e = vrsqrte_f32(v); return vrsqrts_f32(v,e*e) * e; }
159 : SI U32 round(F v, F scale) { return vcvt_u32_f32(mad(v,scale,0.5f)); }
160 : SI U16 pack(U32 v) { return __builtin_convertvector(v, U16); }
161 : SI U8 pack(U16 v) { return __builtin_convertvector(v, U8); }
162 :
163 : SI F if_then_else(I32 c, F t, F e) { return vbsl_f32((U32)c,t,e); }
164 :
165 : SI F floor_(F v) {
166 : F roundtrip = vcvt_f32_s32(vcvt_s32_f32(v));
167 : return roundtrip - if_then_else(roundtrip > v, 1.0_f, 0);
168 : }
169 :
170 : template <typename T>
171 : SI V<T> gather(const T* p, U32 ix) {
172 : return {p[ix[0]], p[ix[1]]};
173 : }
174 :
175 : SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
176 : uint16x4x4_t rgba;
177 : rgba = vld4_lane_u16(ptr + 0, rgba, 0);
178 : rgba = vld4_lane_u16(ptr + 4, rgba, 1);
179 : *r = unaligned_load<U16>(rgba.val+0);
180 : *g = unaligned_load<U16>(rgba.val+1);
181 : *b = unaligned_load<U16>(rgba.val+2);
182 : *a = unaligned_load<U16>(rgba.val+3);
183 : }
184 : SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
185 : uint16x4x4_t rgba = {{
186 : widen_cast<uint16x4_t>(r),
187 : widen_cast<uint16x4_t>(g),
188 : widen_cast<uint16x4_t>(b),
189 : widen_cast<uint16x4_t>(a),
190 : }};
191 : vst4_lane_u16(ptr + 0, rgba, 0);
192 : vst4_lane_u16(ptr + 4, rgba, 1);
193 : }
194 :
195 : SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
196 : float32x2x4_t rgba = vld4_f32(ptr);
197 : *r = rgba.val[0];
198 : *g = rgba.val[1];
199 : *b = rgba.val[2];
200 : *a = rgba.val[3];
201 : }
202 : SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
203 : vst4_f32(ptr, (float32x2x4_t{{r,g,b,a}}));
204 : }
205 :
206 : SI F from_half(U16 h) {
207 : auto v = widen_cast<uint16x4_t>(h);
208 : return vget_low_f32(vcvt_f32_f16(v));
209 : }
210 : SI U16 to_half(F f) {
211 : auto v = widen_cast<float32x4_t>(f);
212 : uint16x4_t h = vcvt_f16_f32(v);
213 : return unaligned_load<U16>(&h);
214 : }
215 :
216 : #elif defined(__AVX__)
217 : #include <immintrin.h>
218 :
219 : // These are __m256 and __m256i, but friendlier and strongly-typed.
220 : template <typename T> using V = T __attribute__((ext_vector_type(8)));
221 : using F = V<float >;
222 : using I32 = V< int32_t>;
223 : using U64 = V<uint64_t>;
224 : using U32 = V<uint32_t>;
225 : using U16 = V<uint16_t>;
226 : using U8 = V<uint8_t >;
227 :
228 : SI F mad(F f, F m, F a) {
229 : #if defined(__FMA__)
230 : return _mm256_fmadd_ps(f,m,a);
231 : #else
232 : return f*m+a;
233 : #endif
234 : }
235 :
236 : SI F min(F a, F b) { return _mm256_min_ps(a,b); }
237 : SI F max(F a, F b) { return _mm256_max_ps(a,b); }
238 : SI F abs_ (F v) { return _mm256_and_ps(v, 0-v); }
239 : SI F floor_(F v) { return _mm256_floor_ps(v); }
240 : SI F rcp (F v) { return _mm256_rcp_ps (v); }
241 : SI F rsqrt (F v) { return _mm256_rsqrt_ps(v); }
242 : SI U32 round (F v, F scale) { return _mm256_cvtps_epi32(v*scale); }
243 :
244 : SI U16 pack(U32 v) {
245 : return _mm_packus_epi32(_mm256_extractf128_si256(v, 0),
246 : _mm256_extractf128_si256(v, 1));
247 : }
248 : SI U8 pack(U16 v) {
249 : auto r = _mm_packus_epi16(v,v);
250 : return unaligned_load<U8>(&r);
251 : }
252 :
253 : SI F if_then_else(I32 c, F t, F e) { return _mm256_blendv_ps(e,t,c); }
254 :
255 : template <typename T>
256 : SI V<T> gather(const T* p, U32 ix) {
257 : return { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]],
258 : p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]], };
259 : }
260 : #if defined(__AVX2__)
261 : SI F gather(const float* p, U32 ix) { return _mm256_i32gather_ps (p, ix, 4); }
262 : SI U32 gather(const uint32_t* p, U32 ix) { return _mm256_i32gather_epi32(p, ix, 4); }
263 : SI U64 gather(const uint64_t* p, U32 ix) {
264 : __m256i parts[] = {
265 : _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,0), 8),
266 : _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,1), 8),
267 : };
268 : return bit_cast<U64>(parts);
269 : }
270 : #endif
271 :
272 : SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
273 : __m128i _01, _23, _45, _67;
274 : if (__builtin_expect(tail,0)) {
275 : auto src = (const double*)ptr;
276 : _01 = _23 = _45 = _67 = _mm_setzero_si128();
277 : if (tail > 0) { _01 = _mm_loadl_pd(_01, src+0); }
278 : if (tail > 1) { _01 = _mm_loadh_pd(_01, src+1); }
279 : if (tail > 2) { _23 = _mm_loadl_pd(_23, src+2); }
280 : if (tail > 3) { _23 = _mm_loadh_pd(_23, src+3); }
281 : if (tail > 4) { _45 = _mm_loadl_pd(_45, src+4); }
282 : if (tail > 5) { _45 = _mm_loadh_pd(_45, src+5); }
283 : if (tail > 6) { _67 = _mm_loadl_pd(_67, src+6); }
284 : } else {
285 : _01 = _mm_loadu_si128(((__m128i*)ptr) + 0);
286 : _23 = _mm_loadu_si128(((__m128i*)ptr) + 1);
287 : _45 = _mm_loadu_si128(((__m128i*)ptr) + 2);
288 : _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
289 : }
290 :
291 : auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
292 : _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3
293 : _46 = _mm_unpacklo_epi16(_45, _67),
294 : _57 = _mm_unpackhi_epi16(_45, _67);
295 :
296 : auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
297 : ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3
298 : rg4567 = _mm_unpacklo_epi16(_46, _57),
299 : ba4567 = _mm_unpackhi_epi16(_46, _57);
300 :
301 : *r = _mm_unpacklo_epi64(rg0123, rg4567);
302 : *g = _mm_unpackhi_epi64(rg0123, rg4567);
303 : *b = _mm_unpacklo_epi64(ba0123, ba4567);
304 : *a = _mm_unpackhi_epi64(ba0123, ba4567);
305 : }
306 : SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
307 : auto rg0123 = _mm_unpacklo_epi16(r, g), // r0 g0 r1 g1 r2 g2 r3 g3
308 : rg4567 = _mm_unpackhi_epi16(r, g), // r4 g4 r5 g5 r6 g6 r7 g7
309 : ba0123 = _mm_unpacklo_epi16(b, a),
310 : ba4567 = _mm_unpackhi_epi16(b, a);
311 :
312 : auto _01 = _mm_unpacklo_epi32(rg0123, ba0123),
313 : _23 = _mm_unpackhi_epi32(rg0123, ba0123),
314 : _45 = _mm_unpacklo_epi32(rg4567, ba4567),
315 : _67 = _mm_unpackhi_epi32(rg4567, ba4567);
316 :
317 : if (__builtin_expect(tail,0)) {
318 : auto dst = (double*)ptr;
319 : if (tail > 0) { _mm_storel_pd(dst+0, _01); }
320 : if (tail > 1) { _mm_storeh_pd(dst+1, _01); }
321 : if (tail > 2) { _mm_storel_pd(dst+2, _23); }
322 : if (tail > 3) { _mm_storeh_pd(dst+3, _23); }
323 : if (tail > 4) { _mm_storel_pd(dst+4, _45); }
324 : if (tail > 5) { _mm_storeh_pd(dst+5, _45); }
325 : if (tail > 6) { _mm_storel_pd(dst+6, _67); }
326 : } else {
327 : _mm_storeu_si128((__m128i*)ptr + 0, _01);
328 : _mm_storeu_si128((__m128i*)ptr + 1, _23);
329 : _mm_storeu_si128((__m128i*)ptr + 2, _45);
330 : _mm_storeu_si128((__m128i*)ptr + 3, _67);
331 : }
332 : }
333 :
334 : SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
335 : F _04, _15, _26, _37;
336 :
337 : switch (tail) {
338 : case 0: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+28), 1);
339 : case 7: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+24), 1);
340 : case 6: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+20), 1);
341 : case 5: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+16), 1);
342 : case 4: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+12), 0);
343 : case 3: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+ 8), 0);
344 : case 2: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+ 4), 0);
345 : case 1: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+ 0), 0);
346 : }
347 :
348 : F rg0145 = _mm256_unpacklo_ps(_04,_15), // r0 r1 g0 g1 | r4 r5 g4 g5
349 : ba0145 = _mm256_unpackhi_ps(_04,_15),
350 : rg2367 = _mm256_unpacklo_ps(_26,_37),
351 : ba2367 = _mm256_unpackhi_ps(_26,_37);
352 :
353 : *r = _mm256_unpacklo_pd(rg0145, rg2367);
354 : *g = _mm256_unpackhi_pd(rg0145, rg2367);
355 : *b = _mm256_unpacklo_pd(ba0145, ba2367);
356 : *a = _mm256_unpackhi_pd(ba0145, ba2367);
357 : }
358 : SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
359 : F rg0145 = _mm256_unpacklo_ps(r, g), // r0 g0 r1 g1 | r4 g4 r5 g5
360 : rg2367 = _mm256_unpackhi_ps(r, g), // r2 ... | r6 ...
361 : ba0145 = _mm256_unpacklo_ps(b, a), // b0 a0 b1 a1 | b4 a4 b5 a5
362 : ba2367 = _mm256_unpackhi_ps(b, a); // b2 ... | b6 ...
363 :
364 : F _04 = _mm256_unpacklo_pd(rg0145, ba0145), // r0 g0 b0 a0 | r4 g4 b4 a4
365 : _15 = _mm256_unpackhi_pd(rg0145, ba0145), // r1 ... | r5 ...
366 : _26 = _mm256_unpacklo_pd(rg2367, ba2367), // r2 ... | r6 ...
367 : _37 = _mm256_unpackhi_pd(rg2367, ba2367); // r3 ... | r7 ...
368 :
369 : if (__builtin_expect(tail, 0)) {
370 : if (tail > 0) { _mm_storeu_ps(ptr+ 0, _mm256_extractf128_ps(_04, 0)); }
371 : if (tail > 1) { _mm_storeu_ps(ptr+ 4, _mm256_extractf128_ps(_15, 0)); }
372 : if (tail > 2) { _mm_storeu_ps(ptr+ 8, _mm256_extractf128_ps(_26, 0)); }
373 : if (tail > 3) { _mm_storeu_ps(ptr+12, _mm256_extractf128_ps(_37, 0)); }
374 : if (tail > 4) { _mm_storeu_ps(ptr+16, _mm256_extractf128_ps(_04, 1)); }
375 : if (tail > 5) { _mm_storeu_ps(ptr+20, _mm256_extractf128_ps(_15, 1)); }
376 : if (tail > 6) { _mm_storeu_ps(ptr+24, _mm256_extractf128_ps(_26, 1)); }
377 : } else {
378 : F _01 = _mm256_permute2f128_ps(_04, _15, 32), // 32 == 0010 0000 == lo, lo
379 : _23 = _mm256_permute2f128_ps(_26, _37, 32),
380 : _45 = _mm256_permute2f128_ps(_04, _15, 49), // 49 == 0011 0001 == hi, hi
381 : _67 = _mm256_permute2f128_ps(_26, _37, 49);
382 : _mm256_storeu_ps(ptr+ 0, _01);
383 : _mm256_storeu_ps(ptr+ 8, _23);
384 : _mm256_storeu_ps(ptr+16, _45);
385 : _mm256_storeu_ps(ptr+24, _67);
386 : }
387 : }
388 :
389 : SI F from_half(U16 h) {
390 : #if defined(__AVX2__)
391 : return _mm256_cvtph_ps(h);
392 : #else
393 : // This technique would slow down ~10x for denorm inputs, so we flush them to zero.
394 : // With a signed comparison this conveniently also flushes negative half floats to zero.
395 : h = _mm_andnot_si128(_mm_cmplt_epi16(h, _mm_set1_epi32(0x04000400_i)), h);
396 :
397 : U32 w = _mm256_setr_m128i(_mm_unpacklo_epi16(h, _mm_setzero_si128()),
398 : _mm_unpackhi_epi16(h, _mm_setzero_si128()));
399 : return bit_cast<F>(w << 13) // Line up the mantissa,
400 : * bit_cast<F>(U32(0x77800000_i)); // then fix up the exponent.
401 : #endif
402 : }
403 : SI U16 to_half(F f) {
404 : #if defined(__AVX2__)
405 : return _mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
406 : #else
407 : return pack(bit_cast<U32>(f * bit_cast<F>(U32(0x07800000_i))) // Fix up the exponent,
408 : >> 13); // then line up the mantissa.
409 : #endif
410 : }
411 :
412 : #elif defined(__SSE2__)
413 : #include <immintrin.h>
414 :
415 : template <typename T> using V = T __attribute__((ext_vector_type(4)));
416 : using F = V<float >;
417 : using I32 = V< int32_t>;
418 : using U64 = V<uint64_t>;
419 : using U32 = V<uint32_t>;
420 : using U16 = V<uint16_t>;
421 : using U8 = V<uint8_t >;
422 :
423 : SI F mad(F f, F m, F a) { return f*m+a; }
424 : SI F min(F a, F b) { return _mm_min_ps(a,b); }
425 : SI F max(F a, F b) { return _mm_max_ps(a,b); }
426 : SI F abs_(F v) { return _mm_and_ps(v, 0-v); }
427 : SI F rcp (F v) { return _mm_rcp_ps (v); }
428 : SI F rsqrt(F v) { return _mm_rsqrt_ps(v); }
429 : SI U32 round(F v, F scale) { return _mm_cvtps_epi32(v*scale); }
430 :
431 : SI U16 pack(U32 v) {
432 : #if defined(__SSE4_1__)
433 : auto p = _mm_packus_epi32(v,v);
434 : #else
435 : // Sign extend so that _mm_packs_epi32() does the pack we want.
436 : auto p = _mm_srai_epi32(_mm_slli_epi32(v, 16), 16);
437 : p = _mm_packs_epi32(p,p);
438 : #endif
439 : return unaligned_load<U16>(&p); // We have two copies. Return (the lower) one.
440 : }
441 : SI U8 pack(U16 v) {
442 : auto r = widen_cast<__m128i>(v);
443 : r = _mm_packus_epi16(r,r);
444 : return unaligned_load<U8>(&r);
445 : }
446 :
447 : SI F if_then_else(I32 c, F t, F e) {
448 : return _mm_or_ps(_mm_and_ps(c, t), _mm_andnot_ps(c, e));
449 : }
450 :
451 : SI F floor_(F v) {
452 : #if defined(__SSE4_1__)
453 : return _mm_floor_ps(v);
454 : #else
455 : F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
456 : return roundtrip - if_then_else(roundtrip > v, 1.0_f, 0);
457 : #endif
458 : }
459 :
460 : template <typename T>
461 : SI V<T> gather(const T* p, U32 ix) {
462 : return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
463 : }
464 :
465 : SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
466 : auto _01 = _mm_loadu_si128(((__m128i*)ptr) + 0),
467 : _23 = _mm_loadu_si128(((__m128i*)ptr) + 1);
468 :
469 : auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
470 : _13 = _mm_unpackhi_epi16(_01, _23); // r1 r3 g1 g3 b1 b3 a1 a3
471 :
472 : auto rg = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
473 : ba = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 a0 a1 a2 a3
474 :
475 : *r = unaligned_load<U16>((uint16_t*)&rg + 0);
476 : *g = unaligned_load<U16>((uint16_t*)&rg + 4);
477 : *b = unaligned_load<U16>((uint16_t*)&ba + 0);
478 : *a = unaligned_load<U16>((uint16_t*)&ba + 4);
479 : }
480 : SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
481 : auto rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)),
482 : ba = _mm_unpacklo_epi16(widen_cast<__m128i>(b), widen_cast<__m128i>(a));
483 : _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg, ba));
484 : _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg, ba));
485 : }
486 :
487 : SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
488 : auto _0 = _mm_loadu_ps(ptr+ 0),
489 : _1 = _mm_loadu_ps(ptr+ 4),
490 : _2 = _mm_loadu_ps(ptr+ 8),
491 : _3 = _mm_loadu_ps(ptr+12);
492 : _MM_TRANSPOSE4_PS(_0,_1,_2,_3);
493 : *r = _0;
494 : *g = _1;
495 : *b = _2;
496 : *a = _3;
497 : }
498 : SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
499 : _MM_TRANSPOSE4_PS(r,g,b,a);
500 : _mm_storeu_ps(ptr+ 0, r);
501 : _mm_storeu_ps(ptr+ 4, g);
502 : _mm_storeu_ps(ptr+ 8, b);
503 : _mm_storeu_ps(ptr+12, a);
504 : }
505 :
506 : SI F from_half(U16 h) {
507 : auto v = widen_cast<__m128i>(h);
508 :
509 : // Same deal as AVX: flush denorms and negatives to zero.
510 : v = _mm_andnot_si128(_mm_cmplt_epi16(v, _mm_set1_epi32(0x04000400_i)), v);
511 :
512 : U32 w = _mm_unpacklo_epi16(v, _mm_setzero_si128());
513 : return bit_cast<F>(w << 13) // Line up the mantissa,
514 : * bit_cast<F>(U32(0x77800000_i)); // then fix up the exponent.
515 : }
516 : SI U16 to_half(F f) {
517 : return pack(bit_cast<U32>(f * bit_cast<F>(U32(0x07800000_i))) // Fix up the exponent,
518 : >> 13); // then line up the mantissa.
519 : }
520 : #endif
521 :
522 : // We need to be a careful with casts.
523 : // (F)x means cast x to float in the portable path, but bit_cast x to float in the others.
524 : // These named casts and bit_cast() are always what they seem to be.
525 : #if defined(JUMPER)
526 : SI F cast (U32 v) { return __builtin_convertvector((I32)v, F); }
527 : SI U32 trunc_(F v) { return (U32)__builtin_convertvector( v, I32); }
528 : SI U32 expand(U16 v) { return __builtin_convertvector( v, U32); }
529 : SI U32 expand(U8 v) { return __builtin_convertvector( v, U32); }
530 : #else
531 0 : SI F cast (U32 v) { return (F)v; }
532 0 : SI U32 trunc_(F v) { return (U32)v; }
533 0 : SI U32 expand(U16 v) { return (U32)v; }
534 0 : SI U32 expand(U8 v) { return (U32)v; }
535 : #endif
536 :
537 0 : SI U16 bswap(U16 x) {
538 : #if defined(JUMPER) && defined(__SSE2__) && !defined(__AVX__)
539 : // Somewhat inexplicably Clang decides to do (x<<8) | (x>>8) in 32-bit lanes
540 : // when generating code for SSE2 and SSE4.1. We'll do it manually...
541 : auto v = widen_cast<__m128i>(x);
542 : v = _mm_slli_epi16(v,8) | _mm_srli_epi16(v,8);
543 : return unaligned_load<U16>(&v);
544 : #else
545 0 : return (x<<8) | (x>>8);
546 : #endif
547 : }
548 :
549 : #endif//SkJumper_vectors_DEFINED
|