25#ifndef _GLIBCXX_EXPERIMENTAL_SIMD_ABIS_H_
26#define _GLIBCXX_EXPERIMENTAL_SIMD_ABIS_H_
28#if __cplusplus >= 201703L
34_GLIBCXX_SIMD_BEGIN_NAMESPACE
37 static inline _GLIBCXX_SIMD_USE_CONSTEXPR _V _S_allbits
38 =
reinterpret_cast<_V
>(~__vector_type_t<char,
sizeof(_V) /
sizeof(
char)>());
42template <
typename _V,
typename = _VectorTraits<_V>>
43 static inline _GLIBCXX_SIMD_USE_CONSTEXPR _V _S_signmask
44 = __xor(_V() + 1, _V() - 1);
46template <
typename _V,
typename = _VectorTraits<_V>>
47 static inline _GLIBCXX_SIMD_USE_CONSTEXPR _V _S_absmask
48 = __andnot(_S_signmask<_V>, _S_allbits<_V>);
53template <
int... _Indices,
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
55 __vector_permute(_Tp __x)
57 static_assert(
sizeof...(_Indices) == _TVT::_S_full_size);
58 return __make_vector<typename _TVT::value_type>(
59 (_Indices == -1 ? 0 : __x[_Indices == -1 ? 0 : _Indices])...);
65template <
int... _Indices,
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
67 __vector_shuffle(_Tp __x, _Tp __y)
69 return _Tp{(_Indices == -1 ? 0
70 : _Indices < _TVT::_S_full_size
72 : __y[_Indices - _TVT::_S_full_size])...};
77template <
typename _Tp,
typename... _Args>
78 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdWrapper<_Tp,
sizeof...(_Args)>
79 __make_wrapper(
const _Args&... __args)
80 {
return __make_vector<_Tp>(__args...); }
84template <
typename _Tp,
size_t _ToN = 0,
typename _Up,
size_t _M,
85 size_t _Np = _ToN != 0 ? _ToN :
sizeof(_Up) * _M /
sizeof(_Tp)>
86 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdWrapper<_Tp, _Np>
87 __wrapper_bitcast(_SimdWrapper<_Up, _M> __x)
89 static_assert(_Np > 1);
90 return __intrin_bitcast<__vector_type_t<_Tp, _Np>>(__x._M_data);
96template <
unsigned __shift,
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
97 _GLIBCXX_SIMD_INTRINSIC _Tp
98 __shift_elements_right(_Tp __v)
100 [[maybe_unused]]
const auto __iv = __to_intrin(__v);
101 static_assert(__shift <=
sizeof(_Tp));
102 if constexpr (__shift == 0)
104 else if constexpr (__shift ==
sizeof(_Tp))
106#if _GLIBCXX_SIMD_X86INTRIN
107 else if constexpr (__have_sse && __shift == 8
108 && _TVT::template _S_is<float, 4>)
109 return _mm_movehl_ps(__iv, __iv);
110 else if constexpr (__have_sse2 && __shift == 8
111 && _TVT::template _S_is<double, 2>)
112 return _mm_unpackhi_pd(__iv, __iv);
113 else if constexpr (__have_sse2 &&
sizeof(_Tp) == 16)
114 return reinterpret_cast<typename _TVT::type
>(
115 _mm_srli_si128(
reinterpret_cast<__m128i
>(__iv), __shift));
116 else if constexpr (__shift == 16 &&
sizeof(_Tp) == 32)
126 return __zero_extend(__hi128(__v));
128 else if constexpr (__have_avx2 &&
sizeof(_Tp) == 32 && __shift < 16)
130 const auto __vll = __vector_bitcast<_LLong>(__v);
131 return reinterpret_cast<typename _TVT::type
>(
132 _mm256_alignr_epi8(_mm256_permute2x128_si256(__vll, __vll, 0x81),
135 else if constexpr (__have_avx &&
sizeof(_Tp) == 32 && __shift < 16)
137 const auto __vll = __vector_bitcast<_LLong>(__v);
138 return reinterpret_cast<typename _TVT::type
>(
139 __concat(_mm_alignr_epi8(__hi128(__vll), __lo128(__vll), __shift),
140 _mm_srli_si128(__hi128(__vll), __shift)));
142 else if constexpr (
sizeof(_Tp) == 32 && __shift > 16)
143 return __zero_extend(__shift_elements_right<__shift - 16>(__hi128(__v)));
144 else if constexpr (
sizeof(_Tp) == 64 && __shift == 32)
145 return __zero_extend(__hi256(__v));
146 else if constexpr (__have_avx512f &&
sizeof(_Tp) == 64)
148 if constexpr (__shift >= 48)
149 return __zero_extend(
150 __shift_elements_right<__shift - 48>(__extract<3, 4>(__v)));
151 else if constexpr (__shift >= 32)
152 return __zero_extend(
153 __shift_elements_right<__shift - 32>(__hi256(__v)));
154 else if constexpr (__shift % 8 == 0)
155 return reinterpret_cast<typename _TVT::type
>(
156 _mm512_alignr_epi64(__m512i(), __intrin_bitcast<__m512i>(__v),
158 else if constexpr (__shift % 4 == 0)
159 return reinterpret_cast<typename _TVT::type
>(
160 _mm512_alignr_epi32(__m512i(), __intrin_bitcast<__m512i>(__v),
162 else if constexpr (__have_avx512bw && __shift < 16)
164 const auto __vll = __vector_bitcast<_LLong>(__v);
165 return reinterpret_cast<typename _TVT::type
>(
166 _mm512_alignr_epi8(_mm512_shuffle_i32x4(__vll, __vll, 0xf9),
169 else if constexpr (__have_avx512bw && __shift < 32)
171 const auto __vll = __vector_bitcast<_LLong>(__v);
172 return reinterpret_cast<typename _TVT::type
>(
173 _mm512_alignr_epi8(_mm512_shuffle_i32x4(__vll, __m512i(), 0xee),
174 _mm512_shuffle_i32x4(__vll, __vll, 0xf9),
178 __assert_unreachable<_Tp>();
187 constexpr int __chunksize = __shift % 8 == 0 ? 8
188 : __shift % 4 == 0 ? 4
189 : __shift % 2 == 0 ? 2
191 auto __w = __vector_bitcast<__int_with_sizeof_t<__chunksize>>(__v);
192 using _Up =
decltype(__w);
193 return __intrin_bitcast<_Tp>(
194 __call_with_n_evaluations<(
sizeof(_Tp) - __shift) / __chunksize>(
195 [](
auto... __chunks) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
196 return _Up{__chunks...};
197 }, [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
198 return __w[__shift / __chunksize + __i];
205template <
int _Index,
int _Total,
int _Combine,
typename _Tp,
size_t _Np>
206 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST
constexpr
207 _SimdWrapper<_Tp, _Np / _Total * _Combine>
208 __extract_part(
const _SimdWrapper<_Tp, _Np> __x)
210 if constexpr (_Index % 2 == 0 && _Total % 2 == 0 && _Combine % 2 == 0)
211 return __extract_part<_Index / 2, _Total / 2, _Combine / 2>(__x);
214 constexpr size_t __values_per_part = _Np / _Total;
215 constexpr size_t __values_to_skip = _Index * __values_per_part;
216 constexpr size_t __return_size = __values_per_part * _Combine;
217 using _R = __vector_type_t<_Tp, __return_size>;
218 static_assert((_Index + _Combine) * __values_per_part *
sizeof(_Tp)
220 "out of bounds __extract_part");
227 if (__x._M_is_constprop())
228 return __generate_from_n_evaluations<__return_size, _R>(
229 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
230 return __x[__values_to_skip + __i];
232 if constexpr (_Index == 0 && _Total == 1)
234 else if constexpr (_Index == 0)
235 return __intrin_bitcast<_R>(__as_vector(__x));
236#if _GLIBCXX_SIMD_X86INTRIN
237 else if constexpr (
sizeof(__x) == 32
238 && __return_size *
sizeof(_Tp) <= 16)
240 constexpr size_t __bytes_to_skip = __values_to_skip *
sizeof(_Tp);
241 if constexpr (__bytes_to_skip == 16)
242 return __vector_bitcast<_Tp, __return_size>(
243 __hi128(__as_vector(__x)));
245 return __vector_bitcast<_Tp, __return_size>(
246 _mm_alignr_epi8(__hi128(__vector_bitcast<_LLong>(__x)),
247 __lo128(__vector_bitcast<_LLong>(__x)),
251 else if constexpr (_Index > 0
252 && (__values_to_skip % __return_size != 0
254 && (__values_to_skip + __return_size) *
sizeof(_Tp)
256 &&
sizeof(__x) >= 16)
257 return __intrin_bitcast<_R>(
258 __shift_elements_right<__values_to_skip *
sizeof(_Tp)>(
263 __builtin_memcpy(&__r,
264 reinterpret_cast<const char*
>(&__x)
265 +
sizeof(_Tp) * __values_to_skip,
266 __return_size *
sizeof(_Tp));
274template <
int _Index,
int _Total,
int _Combine = 1,
size_t _Np>
275 _GLIBCXX_SIMD_INTRINSIC
constexpr _SimdWrapper<bool, _Np / _Total * _Combine>
276 __extract_part(
const _SimdWrapper<bool, _Np> __x)
278 static_assert(_Combine == 1,
"_Combine != 1 not implemented");
279 static_assert(__have_avx512f && _Np == _Np);
280 static_assert(_Total >= 2 && _Index + _Combine <= _Total && _Index >= 0);
281 return __x._M_data >> (_Index * _Np / _Total);
288template <
typename _To,
typename _From,
size_t... _I>
289 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
290 __vector_convert(_From __a, index_sequence<_I...>)
292 using _Tp =
typename _VectorTraits<_To>::value_type;
293 return _To{
static_cast<_Tp
>(__a[_I])...};
296template <
typename _To,
typename _From,
size_t... _I>
297 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
298 __vector_convert(_From __a, _From __b, index_sequence<_I...>)
300 using _Tp =
typename _VectorTraits<_To>::value_type;
301 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...};
304template <
typename _To,
typename _From,
size_t... _I>
305 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
306 __vector_convert(_From __a, _From __b, _From __c, index_sequence<_I...>)
308 using _Tp =
typename _VectorTraits<_To>::value_type;
309 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
310 static_cast<_Tp
>(__c[_I])...};
313template <
typename _To,
typename _From,
size_t... _I>
314 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
315 __vector_convert(_From __a, _From __b, _From __c, _From __d,
316 index_sequence<_I...>)
318 using _Tp =
typename _VectorTraits<_To>::value_type;
319 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
320 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...};
323template <
typename _To,
typename _From,
size_t... _I>
324 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
325 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
326 index_sequence<_I...>)
328 using _Tp =
typename _VectorTraits<_To>::value_type;
329 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
330 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
331 static_cast<_Tp
>(__e[_I])...};
334template <
typename _To,
typename _From,
size_t... _I>
335 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
336 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
337 _From __f, index_sequence<_I...>)
339 using _Tp =
typename _VectorTraits<_To>::value_type;
340 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
341 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
342 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...};
345template <
typename _To,
typename _From,
size_t... _I>
346 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
347 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
348 _From __f, _From __g, index_sequence<_I...>)
350 using _Tp =
typename _VectorTraits<_To>::value_type;
351 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
352 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
353 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
354 static_cast<_Tp
>(__g[_I])...};
357template <
typename _To,
typename _From,
size_t... _I>
358 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
359 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
360 _From __f, _From __g, _From __h, index_sequence<_I...>)
362 using _Tp =
typename _VectorTraits<_To>::value_type;
363 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
364 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
365 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
366 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...};
369template <
typename _To,
typename _From,
size_t... _I>
370 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
371 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
372 _From __f, _From __g, _From __h, _From __i,
373 index_sequence<_I...>)
375 using _Tp =
typename _VectorTraits<_To>::value_type;
376 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
377 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
378 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
379 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
380 static_cast<_Tp
>(__i[_I])...};
383template <
typename _To,
typename _From,
size_t... _I>
384 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
385 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
386 _From __f, _From __g, _From __h, _From __i, _From __j,
387 index_sequence<_I...>)
389 using _Tp =
typename _VectorTraits<_To>::value_type;
390 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
391 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
392 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
393 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
394 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...};
397template <
typename _To,
typename _From,
size_t... _I>
398 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
399 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
400 _From __f, _From __g, _From __h, _From __i, _From __j,
401 _From __k, index_sequence<_I...>)
403 using _Tp =
typename _VectorTraits<_To>::value_type;
404 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
405 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
406 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
407 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
408 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
409 static_cast<_Tp
>(__k[_I])...};
412template <
typename _To,
typename _From,
size_t... _I>
413 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
414 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
415 _From __f, _From __g, _From __h, _From __i, _From __j,
416 _From __k, _From __l, index_sequence<_I...>)
418 using _Tp =
typename _VectorTraits<_To>::value_type;
419 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
420 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
421 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
422 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
423 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
424 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...};
427template <
typename _To,
typename _From,
size_t... _I>
428 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
429 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
430 _From __f, _From __g, _From __h, _From __i, _From __j,
431 _From __k, _From __l, _From __m, index_sequence<_I...>)
433 using _Tp =
typename _VectorTraits<_To>::value_type;
434 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
435 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
436 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
437 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
438 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
439 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
440 static_cast<_Tp
>(__m[_I])...};
443template <
typename _To,
typename _From,
size_t... _I>
444 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
445 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
446 _From __f, _From __g, _From __h, _From __i, _From __j,
447 _From __k, _From __l, _From __m, _From __n,
448 index_sequence<_I...>)
450 using _Tp =
typename _VectorTraits<_To>::value_type;
451 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
452 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
453 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
454 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
455 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
456 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
457 static_cast<_Tp
>(__m[_I])...,
static_cast<_Tp
>(__n[_I])...};
460template <
typename _To,
typename _From,
size_t... _I>
461 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
462 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
463 _From __f, _From __g, _From __h, _From __i, _From __j,
464 _From __k, _From __l, _From __m, _From __n, _From __o,
465 index_sequence<_I...>)
467 using _Tp =
typename _VectorTraits<_To>::value_type;
468 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
469 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
470 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
471 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
472 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
473 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
474 static_cast<_Tp
>(__m[_I])...,
static_cast<_Tp
>(__n[_I])...,
475 static_cast<_Tp
>(__o[_I])...};
478template <
typename _To,
typename _From,
size_t... _I>
479 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
480 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
481 _From __f, _From __g, _From __h, _From __i, _From __j,
482 _From __k, _From __l, _From __m, _From __n, _From __o,
483 _From __p, index_sequence<_I...>)
485 using _Tp =
typename _VectorTraits<_To>::value_type;
486 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
487 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
488 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
489 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
490 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
491 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
492 static_cast<_Tp
>(__m[_I])...,
static_cast<_Tp
>(__n[_I])...,
493 static_cast<_Tp
>(__o[_I])...,
static_cast<_Tp
>(__p[_I])...};
499template <
typename _To,
typename... _From,
size_t _FromSize>
500 _GLIBCXX_SIMD_INTRINSIC
constexpr _To
501 __vector_convert(_SimdWrapper<_From, _FromSize>... __xs)
503#ifdef _GLIBCXX_SIMD_WORKAROUND_PR85048
504 using _From0 = __first_of_pack_t<_From...>;
505 using _FW = _SimdWrapper<_From0, _FromSize>;
506 if (!_FW::_S_is_partial && !(... && __xs._M_is_constprop()))
508 if constexpr ((
sizeof...(_From) & (
sizeof...(_From) - 1))
510 return __convert_x86<_To>(__as_vector(__xs)...);
512 return __vector_convert<_To>(__xs..., _FW{});
516 return __vector_convert<_To>(
517 __as_vector(__xs)...,
518 make_index_sequence<(
sizeof...(__xs) == 1 ?
std::min(
519 _VectorTraits<_To>::_S_full_size,
int(_FromSize))
525template <
typename _To,
typename _From,
typename... _More>
526 _GLIBCXX_SIMD_INTRINSIC
constexpr auto
527 __convert(_From __v0, _More... __vs)
529 static_assert((
true && ... && is_same_v<_From, _More>) );
530 if constexpr (__is_vectorizable_v<_From>)
532 using _V =
typename _VectorTraits<_To>::type;
533 using _Tp =
typename _VectorTraits<_To>::value_type;
534 return _V{
static_cast<_Tp
>(__v0),
static_cast<_Tp
>(__vs)...};
536 else if constexpr (__is_vector_type_v<_From>)
537 return __convert<_To>(__as_wrapper(__v0), __as_wrapper(__vs)...);
540 constexpr size_t __input_size = _From::_S_size * (1 +
sizeof...(_More));
541 if constexpr (__is_vectorizable_v<_To>)
542 return __convert<__vector_type_t<_To, __input_size>>(__v0, __vs...);
543 else if constexpr (!__is_vector_type_v<_To>)
544 return _To(__convert<typename _To::_BuiltinType>(__v0, __vs...));
548 sizeof...(_More) == 0
549 || _VectorTraits<_To>::_S_full_size >= __input_size,
550 "__convert(...) requires the input to fit into the output");
551 return __vector_convert<_To>(__v0, __vs...);
561template <
typename _To,
566 typename _From,
typename _FromVT = _VectorTraits<_From>>
567 _GLIBCXX_SIMD_INTRINSIC
auto
568 __convert_all(_From __v)
570 if constexpr (is_arithmetic_v<_To> && _NParts != 1)
572 static_assert(_Offset < _FromVT::_S_full_size);
574 = _NParts == 0 ? _FromVT::_S_partial_width - _Offset : _NParts;
575 return __generate_from_n_evaluations<_Np, array<_To, _Np>>(
576 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
577 return static_cast<_To
>(__v[__i + _Offset]);
582 static_assert(__is_vector_type_v<_To>);
583 using _ToVT = _VectorTraits<_To>;
584 if constexpr (__is_vector_type_v<_From>)
585 return __convert_all<_To, _NParts>(__as_wrapper(__v));
586 else if constexpr (_NParts == 1)
588 static_assert(_Offset % _ToVT::_S_full_size == 0);
589 return array<_To, 1>{__vector_convert<_To>(
590 __extract_part<_Offset / _ToVT::_S_full_size,
591 __div_roundup(_FromVT::_S_partial_width,
592 _ToVT::_S_full_size)>(__v))};
594#if _GLIBCXX_SIMD_X86INTRIN
595 else if constexpr (!__have_sse4_1 && _Offset == 0
596 && is_integral_v<typename _FromVT::value_type>
597 &&
sizeof(
typename _FromVT::value_type)
598 <
sizeof(
typename _ToVT::value_type)
599 && !(
sizeof(
typename _FromVT::value_type) == 4
600 && is_same_v<typename _ToVT::value_type, double>))
602 using _ToT =
typename _ToVT::value_type;
603 using _FromT =
typename _FromVT::value_type;
607 : (_FromVT::_S_partial_width / _ToVT::_S_full_size);
608 using _R = array<_To, _Np>;
613 [[maybe_unused]]
auto __adjust
615 auto __vv) -> _SimdWrapper<_FromT,
decltype(__n)::value> {
616 return __vector_bitcast<_FromT, decltype(__n)::value>(__vv);
618 [[maybe_unused]]
const auto __vi = __to_intrin(__v);
620 = [](
auto __x0, [[maybe_unused]]
auto __x1) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
621 if constexpr (_Np == 1)
622 return _R{__intrin_bitcast<_To>(__x0)};
624 return _R{__intrin_bitcast<_To>(__x0),
625 __intrin_bitcast<_To>(__x1)};
628 if constexpr (_Np == 0)
630 else if constexpr (
sizeof(_FromT) == 1 &&
sizeof(_ToT) == 2)
632 static_assert(is_integral_v<_FromT>);
633 static_assert(is_integral_v<_ToT>);
634 if constexpr (is_unsigned_v<_FromT>)
635 return __make_array(_mm_unpacklo_epi8(__vi, __m128i()),
636 _mm_unpackhi_epi8(__vi, __m128i()));
639 _mm_srai_epi16(_mm_unpacklo_epi8(__vi, __vi), 8),
640 _mm_srai_epi16(_mm_unpackhi_epi8(__vi, __vi), 8));
642 else if constexpr (
sizeof(_FromT) == 2 &&
sizeof(_ToT) == 4)
644 static_assert(is_integral_v<_FromT>);
645 if constexpr (is_floating_point_v<_ToT>)
648 = __convert_all<__vector_type16_t<int>, _Np>(
649 __adjust(_SizeConstant<_Np * 4>(), __v));
650 return __generate_from_n_evaluations<_Np, _R>(
651 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
652 return __vector_convert<_To>(__as_wrapper(__ints[__i]));
655 else if constexpr (is_unsigned_v<_FromT>)
656 return __make_array(_mm_unpacklo_epi16(__vi, __m128i()),
657 _mm_unpackhi_epi16(__vi, __m128i()));
660 _mm_srai_epi32(_mm_unpacklo_epi16(__vi, __vi), 16),
661 _mm_srai_epi32(_mm_unpackhi_epi16(__vi, __vi), 16));
663 else if constexpr (
sizeof(_FromT) == 4 &&
sizeof(_ToT) == 8
664 && is_integral_v<_FromT> && is_integral_v<_ToT>)
666 if constexpr (is_unsigned_v<_FromT>)
667 return __make_array(_mm_unpacklo_epi32(__vi, __m128i()),
668 _mm_unpackhi_epi32(__vi, __m128i()));
671 _mm_unpacklo_epi32(__vi, _mm_srai_epi32(__vi, 31)),
672 _mm_unpackhi_epi32(__vi, _mm_srai_epi32(__vi, 31)));
674 else if constexpr (
sizeof(_FromT) == 4 &&
sizeof(_ToT) == 8
675 && is_integral_v<_FromT> && is_integral_v<_ToT>)
677 if constexpr (is_unsigned_v<_FromT>)
678 return __make_array(_mm_unpacklo_epi32(__vi, __m128i()),
679 _mm_unpackhi_epi32(__vi, __m128i()));
682 _mm_unpacklo_epi32(__vi, _mm_srai_epi32(__vi, 31)),
683 _mm_unpackhi_epi32(__vi, _mm_srai_epi32(__vi, 31)));
685 else if constexpr (
sizeof(_FromT) == 1 &&
sizeof(_ToT) >= 4
686 && is_signed_v<_FromT>)
688 const __m128i __vv[2] = {_mm_unpacklo_epi8(__vi, __vi),
689 _mm_unpackhi_epi8(__vi, __vi)};
690 const __vector_type_t<int, 4> __vvvv[4] = {
691 __vector_bitcast<int>(_mm_unpacklo_epi16(__vv[0], __vv[0])),
692 __vector_bitcast<int>(_mm_unpackhi_epi16(__vv[0], __vv[0])),
693 __vector_bitcast<int>(_mm_unpacklo_epi16(__vv[1], __vv[1])),
694 __vector_bitcast<int>(_mm_unpackhi_epi16(__vv[1], __vv[1]))};
695 if constexpr (
sizeof(_ToT) == 4)
696 return __generate_from_n_evaluations<_Np, _R>(
697 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
698 return __vector_convert<_To>(
699 _SimdWrapper<int, 4>(__vvvv[__i] >> 24));
701 else if constexpr (is_integral_v<_ToT>)
702 return __generate_from_n_evaluations<_Np, _R>(
703 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
704 const auto __signbits = __to_intrin(__vvvv[__i / 2] >> 31);
705 const auto __sx32 = __to_intrin(__vvvv[__i / 2] >> 24);
706 return __vector_bitcast<_ToT>(
707 __i % 2 == 0 ? _mm_unpacklo_epi32(__sx32, __signbits)
708 : _mm_unpackhi_epi32(__sx32, __signbits));
711 return __generate_from_n_evaluations<_Np, _R>(
712 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
713 const _SimdWrapper<int, 4> __int4 = __vvvv[__i / 2] >> 24;
714 return __vector_convert<_To>(
715 __i % 2 == 0 ? __int4
716 : _SimdWrapper<int, 4>(
717 _mm_unpackhi_epi64(__to_intrin(__int4),
718 __to_intrin(__int4))));
721 else if constexpr (
sizeof(_FromT) == 1 &&
sizeof(_ToT) == 4)
723 const auto __shorts = __convert_all<__vector_type16_t<
724 conditional_t<is_signed_v<_FromT>, short,
unsigned short>>>(
725 __adjust(_SizeConstant<(_Np + 1) / 2 * 8>(), __v));
726 return __generate_from_n_evaluations<_Np, _R>(
727 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
728 return __convert_all<_To>(__shorts[__i / 2])[__i % 2];
731 else if constexpr (
sizeof(_FromT) == 2 &&
sizeof(_ToT) == 8
732 && is_signed_v<_FromT> && is_integral_v<_ToT>)
734 const __m128i __vv[2] = {_mm_unpacklo_epi16(__vi, __vi),
735 _mm_unpackhi_epi16(__vi, __vi)};
736 const __vector_type16_t<int> __vvvv[4]
737 = {__vector_bitcast<int>(
738 _mm_unpacklo_epi32(_mm_srai_epi32(__vv[0], 16),
739 _mm_srai_epi32(__vv[0], 31))),
740 __vector_bitcast<int>(
741 _mm_unpackhi_epi32(_mm_srai_epi32(__vv[0], 16),
742 _mm_srai_epi32(__vv[0], 31))),
743 __vector_bitcast<int>(
744 _mm_unpacklo_epi32(_mm_srai_epi32(__vv[1], 16),
745 _mm_srai_epi32(__vv[1], 31))),
746 __vector_bitcast<int>(
747 _mm_unpackhi_epi32(_mm_srai_epi32(__vv[1], 16),
748 _mm_srai_epi32(__vv[1], 31)))};
749 return __generate_from_n_evaluations<_Np, _R>(
750 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
751 return __vector_bitcast<_ToT>(__vvvv[__i]);
754 else if constexpr (
sizeof(_FromT) <= 2 &&
sizeof(_ToT) == 8)
758 is_signed_v<_FromT> || is_floating_point_v<_ToT>, int,
760 __adjust(_SizeConstant<(_Np + 1) / 2 * 4>(), __v));
761 return __generate_from_n_evaluations<_Np, _R>(
762 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
763 return __convert_all<_To>(__ints[__i / 2])[__i % 2];
767 __assert_unreachable<_To>();
770 else if constexpr ((_FromVT::_S_partial_width - _Offset)
771 > _ToVT::_S_full_size)
780 constexpr size_t _NTotal
781 = (_FromVT::_S_partial_width - _Offset) / _ToVT::_S_full_size;
782 constexpr size_t _Np = _NParts == 0 ? _NTotal : _NParts;
785 || (_Np == _NTotal + 1
786 && (_FromVT::_S_partial_width - _Offset) % _ToVT::_S_full_size
788 using _R = array<_To, _Np>;
789 if constexpr (_Np == 1)
790 return _R{__vector_convert<_To>(
791 __extract_part<_Offset, _FromVT::_S_partial_width,
792 _ToVT::_S_full_size>(__v))};
794 return __generate_from_n_evaluations<_Np, _R>(
795 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
797 = __extract_part<__i * _ToVT::_S_full_size + _Offset,
798 _FromVT::_S_partial_width,
799 _ToVT::_S_full_size>(__v);
800 return __vector_convert<_To>(__part);
803 else if constexpr (_Offset == 0)
804 return array<_To, 1>{__vector_convert<_To>(__v)};
806 return array<_To, 1>{__vector_convert<_To>(
807 __extract_part<_Offset, _FromVT::_S_partial_width,
808 _FromVT::_S_partial_width - _Offset>(__v))};
815template <
typename _Tp,
typename _Mp,
typename _Abi,
size_t _Np>
819 using _SimdImpl =
typename _Abi::_SimdImpl;
820 using _MaskImpl =
typename _Abi::_MaskImpl;
823 using _SimdMember = _SimdWrapper<_Tp, _Np>;
824 using _MaskMember = _SimdWrapper<_Mp, _Np>;
825 static constexpr size_t _S_simd_align =
alignof(_SimdMember);
826 static constexpr size_t _S_mask_align =
alignof(_MaskMember);
830 static constexpr size_t _S_full_size = _SimdMember::_S_full_size;
831 static constexpr bool _S_is_partial = _SimdMember::_S_is_partial;
838 operator __intrinsic_type_t<_Tp, _Np>()
const
839 {
return __to_intrin(
static_cast<const simd<_Tp, _Abi>*
>(
this)->_M_data); }
842 operator __vector_type_t<_Tp, _Np>()
const
843 {
return static_cast<const simd<_Tp, _Abi>*
>(
this)->_M_data.__builtin(); }
849 operator __intrinsic_type_t<_Tp, _Np>()
const
850 {
return __data(*
static_cast<const simd<_Tp, _Abi>*
>(
this)); }
854 is_same<__intrinsic_type_t<_Tp, _Np>, __vector_type_t<_Tp, _Np>>::value,
855 _SimdBase1, _SimdBase2>;
862 operator __intrinsic_type_t<_Tp, _Np>()
const
863 {
return static_cast<const simd_mask<_Tp, _Abi>*
>(
this) ->_M_data.__intrin(); }
866 operator __vector_type_t<_Tp, _Np>()
const
867 {
return static_cast<const simd_mask<_Tp, _Abi>*
>(
this)->_M_data._M_data; }
873 operator __intrinsic_type_t<_Tp, _Np>()
const
874 {
return __data(*
static_cast<const simd_mask<_Tp, _Abi>*
>(
this)); }
878 is_same<__intrinsic_type_t<_Tp, _Np>, __vector_type_t<_Tp, _Np>>::value,
879 _MaskBase1, _MaskBase2>;
886 using _Up = __intrinsic_type_t<_Tp, _Np>;
890 _MaskCastType(_Up __x) : _M_data(__x) {}
892 operator _MaskMember()
const {
return _M_data; }
900 using _Ap = __intrinsic_type_t<_Tp, _Np>;
905 _SimdCastType1(_Ap __a) : _M_data(__vector_bitcast<_Tp>(__a)) {}
908 operator _SimdMember()
const {
return _M_data; }
913 using _Ap = __intrinsic_type_t<_Tp, _Np>;
914 using _Bp = __vector_type_t<_Tp, _Np>;
919 _SimdCastType2(_Ap __a) : _M_data(__vector_bitcast<_Tp>(__a)) {}
922 _SimdCastType2(_Bp __b) : _M_data(__b) {}
925 operator _SimdMember()
const {
return _M_data; }
929 is_same<__intrinsic_type_t<_Tp, _Np>, __vector_type_t<_Tp, _Np>>::value,
930 _SimdCastType1, _SimdCastType2>;
935struct _CommonImplX86;
936struct _CommonImplNeon;
937struct _CommonImplBuiltin;
938template <
typename _Abi>
struct _SimdImplBuiltin;
939template <
typename _Abi>
struct _MaskImplBuiltin;
940template <
typename _Abi>
struct _SimdImplX86;
941template <
typename _Abi>
struct _MaskImplX86;
942template <
typename _Abi>
struct _SimdImplNeon;
943template <
typename _Abi>
struct _MaskImplNeon;
944template <
typename _Abi>
struct _SimdImplPpc;
945template <
typename _Abi>
struct _MaskImplPpc;
948template <
int _UsedBytes>
949 struct simd_abi::_VecBuiltin
951 template <
typename _Tp>
952 static constexpr size_t _S_size = _UsedBytes /
sizeof(_Tp);
955 struct _IsValidAbiTag : __bool_constant<(_UsedBytes > 1)> {};
957 template <
typename _Tp>
958 struct _IsValidSizeFor
959 : __bool_constant<(_UsedBytes / sizeof(_Tp) > 1
960 && _UsedBytes % sizeof(_Tp) == 0
961 && _UsedBytes <= __vectorized_sizeof<_Tp>()
962 && (!__have_avx512f || _UsedBytes <= 32))> {};
964 template <typename _Tp>
965 struct _IsValid : conjunction<_IsValidAbiTag, __is_vectorizable<_Tp>,
966 _IsValidSizeFor<_Tp>> {};
968 template <typename _Tp>
969 static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
973#if _GLIBCXX_SIMD_X86INTRIN
974 using _CommonImpl = _CommonImplX86;
975 using _SimdImpl = _SimdImplX86<_VecBuiltin<_UsedBytes>>;
976 using _MaskImpl = _MaskImplX86<_VecBuiltin<_UsedBytes>>;
977#elif _GLIBCXX_SIMD_HAVE_NEON
978 using _CommonImpl = _CommonImplNeon;
979 using _SimdImpl = _SimdImplNeon<_VecBuiltin<_UsedBytes>>;
980 using _MaskImpl = _MaskImplNeon<_VecBuiltin<_UsedBytes>>;
982 using _CommonImpl = _CommonImplBuiltin;
984 using _SimdImpl = _SimdImplPpc<_VecBuiltin<_UsedBytes>>;
985 using _MaskImpl = _MaskImplPpc<_VecBuiltin<_UsedBytes>>;
987 using _SimdImpl = _SimdImplBuiltin<_VecBuiltin<_UsedBytes>>;
988 using _MaskImpl = _MaskImplBuiltin<_VecBuiltin<_UsedBytes>>;
994 template <typename _Tp>
995 using _MaskValueType = __int_for_sizeof_t<_Tp>;
997 template <typename _Tp>
999 = conditional_t<_S_is_valid_v<_Tp>,
1000 _GnuTraits<_Tp, _MaskValueType<_Tp>,
1001 _VecBuiltin<_UsedBytes>, _S_size<_Tp>>,
1006 template <typename _Tp>
1007 static constexpr size_t _S_full_size = __traits<_Tp>::_S_full_size;
1009 template <typename _Tp>
1010 static constexpr bool _S_is_partial = __traits<_Tp>::_S_is_partial;
1014 template <typename _Tp>
1015 using _MaskMember = _SimdWrapper<_MaskValueType<_Tp>, _S_size<_Tp>>;
1017 template <typename _Tp>
1018 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
1021 using _UV = typename _MaskMember<_Tp>::_BuiltinType;
1022 if constexpr (!_MaskMember<_Tp>::_S_is_partial)
1026 constexpr auto __size = _S_size<_Tp>;
1027 _GLIBCXX_SIMD_USE_CONSTEXPR auto __r
1028 = __generate_vector<_UV>([](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
1029 { return __i < __size ? -1 : 0; });
1034 template <typename _Tp>
1035 _GLIBCXX_SIMD_INTRINSIC static constexpr __intrinsic_type_t<_Tp, _S_size<_Tp>>
1036 _S_implicit_mask_intrin()
1037 { return __to_intrin(__vector_bitcast<_Tp>(_S_implicit_mask<_Tp>()._M_data)); }
1039 template <typename _TW, typename _TVT = _VectorTraits<_TW>>
1040 _GLIBCXX_SIMD_INTRINSIC static constexpr _TW
1043 using _Tp = typename _TVT::value_type;
1044 if constexpr (!_MaskMember<_Tp>::_S_is_partial)
1047 return __and(__as_vector(__x),
1048 __vector_bitcast<_Tp>(_S_implicit_mask<_Tp>()));
1051 template <typename _TW, typename _TVT = _VectorTraits<_TW>>
1052 _GLIBCXX_SIMD_INTRINSIC static constexpr auto
1053 __make_padding_nonzero(_TW __x)
1055 using _Tp = typename _TVT::value_type;
1056 if constexpr (!_S_is_partial<_Tp>)
1060 _GLIBCXX_SIMD_USE_CONSTEXPR auto __implicit_mask
1061 = __vector_bitcast<_Tp>(_S_implicit_mask<_Tp>());
1062 if constexpr (is_integral_v<_Tp>)
1063 return __or(__x, ~__implicit_mask);
1066 _GLIBCXX_SIMD_USE_CONSTEXPR auto __one
1067 = __andnot(__implicit_mask,
1068 __vector_broadcast<_S_full_size<_Tp>>(_Tp(1)));
1072 return __or(__and(__x, __implicit_mask), __one);
1081template <int _UsedBytes>
1082 struct simd_abi::_VecBltnBtmsk
1084 template <typename _Tp>
1085 static constexpr size_t _S_size = _UsedBytes / sizeof(_Tp);
1088 struct _IsValidAbiTag : __bool_constant<(_UsedBytes > 1)> {};
1090 template <typename _Tp>
1091 struct _IsValidSizeFor
1092 : __bool_constant<(_UsedBytes / sizeof(_Tp) > 1
1093 && _UsedBytes % sizeof(_Tp) == 0 && _UsedBytes <= 64
1094 && (_UsedBytes > 32 || __have_avx512vl))> {};
1098 template <
typename _Tp>
1101 _IsValidAbiTag, __bool_constant<__have_avx512f>,
1102 __bool_constant<__have_avx512bw || (sizeof(_Tp) >= 4)>,
1103 __bool_constant<(__vectorized_sizeof<_Tp>() > sizeof(_Tp))>,
1104 _IsValidSizeFor<_Tp>> {};
1106 template <
typename _Tp>
1107 static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
1111 #if _GLIBCXX_SIMD_X86INTRIN
1112 using _CommonImpl = _CommonImplX86;
1113 using _SimdImpl = _SimdImplX86<_VecBltnBtmsk<_UsedBytes>>;
1114 using _MaskImpl = _MaskImplX86<_VecBltnBtmsk<_UsedBytes>>;
1117 struct _MissingImpl;
1119 using _CommonImpl = _MissingImpl<_UsedBytes>;
1120 using _SimdImpl = _MissingImpl<_UsedBytes>;
1121 using _MaskImpl = _MissingImpl<_UsedBytes>;
1126 template <
typename _Tp>
1127 using _MaskMember = _SimdWrapper<bool, _S_size<_Tp>>;
1129 template <
typename _Tp>
1132 _GnuTraits<_Tp, bool, _VecBltnBtmsk<_UsedBytes>, _S_size<_Tp>>,
1137 template <
typename _Tp>
1138 static constexpr size_t _S_full_size = __traits<_Tp>::_S_full_size;
1139 template <
typename _Tp>
1140 static constexpr bool _S_is_partial = __traits<_Tp>::_S_is_partial;
1145 template <
typename _Tp>
1146 using _ImplicitMask = _SimdWrapper<bool, _S_size<_Tp>>;
1149 template <
size_t _Np>
1150 _GLIBCXX_SIMD_INTRINSIC
static constexpr __bool_storage_member_type_t<_Np>
1153 using _Tp = __bool_storage_member_type_t<_Np>;
1154 return _Np <
sizeof(_Tp) * __CHAR_BIT__ ? _Tp((1ULL << _Np) - 1) : ~_Tp();
1157 template <
typename _Tp>
1158 _GLIBCXX_SIMD_INTRINSIC
static constexpr _ImplicitMask<_Tp>
1160 {
return __implicit_mask_n<_S_size<_Tp>>(); }
1162 template <
typename _Tp>
1163 _GLIBCXX_SIMD_INTRINSIC
static constexpr __bool_storage_member_type_t<_S_size<_Tp>>
1164 _S_implicit_mask_intrin()
1165 {
return __implicit_mask_n<_S_size<_Tp>>(); }
1167 template <
typename _Tp,
size_t _Np>
1168 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1169 _S_masked(_SimdWrapper<_Tp, _Np> __x)
1171 if constexpr (is_same_v<_Tp, bool>)
1172 if constexpr (_Np < 8 || (_Np & (_Np - 1)) != 0)
1173 return _MaskImpl::_S_bit_and(
1174 __x, _SimdWrapper<_Tp, _Np>(
1175 __bool_storage_member_type_t<_Np>((1ULL << _Np) - 1)));
1179 return _S_masked(__x._M_data);
1182 template <
typename _TV>
1183 _GLIBCXX_SIMD_INTRINSIC
static constexpr _TV
1186 using _Tp =
typename _VectorTraits<_TV>::value_type;
1188 !__is_bitmask_v<_TV>,
1189 "_VecBltnBtmsk::_S_masked cannot work on bitmasks, since it doesn't "
1190 "know the number of elements. Use _SimdWrapper<bool, N> instead.");
1191 if constexpr (_S_is_partial<_Tp>)
1193 constexpr size_t _Np = _S_size<_Tp>;
1194 return __make_dependent_t<_TV, _CommonImpl>::_S_blend(
1195 _S_implicit_mask<_Tp>(), _SimdWrapper<_Tp, _Np>(),
1196 _SimdWrapper<_Tp, _Np>(__x));
1202 template <
typename _TV,
typename _TVT = _VectorTraits<_TV>>
1203 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
1204 __make_padding_nonzero(_TV __x)
1206 using _Tp =
typename _TVT::value_type;
1207 if constexpr (!_S_is_partial<_Tp>)
1211 constexpr size_t _Np = _S_size<_Tp>;
1212 if constexpr (is_integral_v<typename _TVT::value_type>)
1214 | __generate_vector<_Tp, _S_full_size<_Tp>>(
1215 [](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA -> _Tp {
1222 return __make_dependent_t<_TV, _CommonImpl>::_S_blend(
1223 _S_implicit_mask<_Tp>(),
1224 _SimdWrapper<_Tp, _Np>(
1225 __vector_broadcast<_S_full_size<_Tp>>(_Tp(1))),
1226 _SimdWrapper<_Tp, _Np>(__x))
1236struct _CommonImplBuiltin
1243 template <
typename _From,
typename _To,
size_t _ToSize>
1244 static inline constexpr bool __converts_via_decomposition_v
1245 =
sizeof(_From) !=
sizeof(_To);
1249 template <
typename _Tp,
size_t _Np,
size_t _Bytes = _Np * sizeof(_Tp)>
1250 _GLIBCXX_SIMD_INTRINSIC
static __vector_type_t<_Tp, _Np>
1251 _S_load(
const void* __p)
1253 static_assert(_Np > 1);
1254 static_assert(_Bytes %
sizeof(_Tp) == 0);
1255 using _Rp = __vector_type_t<_Tp, _Np>;
1256 if constexpr (
sizeof(_Rp) == _Bytes)
1259 __builtin_memcpy(&__r, __p, _Bytes);
1264#ifdef _GLIBCXX_SIMD_WORKAROUND_PR90424
1268 conditional_t<_Bytes % 8 == 0, long long, int>,
1269 conditional_t<_Bytes % 2 == 0, short, signed char>>,
1270 conditional_t<(_Bytes < 8 || _Np % 2 == 1 || _Np == 2), _Tp,
1272 using _V = __vector_type_t<_Up, _Np *
sizeof(_Tp) /
sizeof(_Up)>;
1273 if constexpr (
sizeof(_V) !=
sizeof(_Rp))
1276 __builtin_memcpy(&__r, __p, _Bytes);
1285 static_assert(_Bytes <=
sizeof(_V));
1286 __builtin_memcpy(&__r, __p, _Bytes);
1287 return reinterpret_cast<_Rp
>(__r);
1294 template <
size_t _ReqBytes = 0,
typename _TV>
1295 _GLIBCXX_SIMD_INTRINSIC
static void
1296 _S_store(_TV __x,
void* __addr)
1298 constexpr size_t _Bytes = _ReqBytes == 0 ?
sizeof(__x) : _ReqBytes;
1299 static_assert(
sizeof(__x) >= _Bytes);
1301 if constexpr (__is_vector_type_v<_TV>)
1303 using _Tp =
typename _VectorTraits<_TV>::value_type;
1304 constexpr size_t _Np = _Bytes /
sizeof(_Tp);
1305 static_assert(_Np *
sizeof(_Tp) == _Bytes);
1307#ifdef _GLIBCXX_SIMD_WORKAROUND_PR90424
1309 (is_integral_v<_Tp> || _Bytes < 4),
1310 conditional_t<(
sizeof(__x) >
sizeof(
long long)),
long long, _Tp>,
1312 const auto __v = __vector_bitcast<_Up>(__x);
1314 const __vector_type_t<_Tp, _Np> __v = __x;
1317 if constexpr ((_Bytes & (_Bytes - 1)) != 0)
1319 constexpr size_t _MoreBytes = std::__bit_ceil(_Bytes);
1320 alignas(
decltype(__v))
char __tmp[_MoreBytes];
1321 __builtin_memcpy(__tmp, &__v, _MoreBytes);
1322 __builtin_memcpy(__addr, __tmp, _Bytes);
1325 __builtin_memcpy(__addr, &__v, _Bytes);
1328 __builtin_memcpy(__addr, &__x, _Bytes);
1331 template <
typename _Tp,
size_t _Np>
1332 _GLIBCXX_SIMD_INTRINSIC
static void
1333 _S_store(_SimdWrapper<_Tp, _Np> __x,
void* __addr)
1334 { _S_store<_Np * sizeof(_Tp)>(__x._M_data, __addr); }
1338 template <
size_t _Np,
bool _Sanitized>
1339 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
1340 _S_store_bool_array(_BitMask<_Np, _Sanitized> __x,
bool* __mem)
1342 if constexpr (_Np == 1)
1344 else if (__builtin_is_constant_evaluated())
1346 for (
size_t __i = 0; __i < _Np; ++__i)
1347 __mem[__i] = __x[__i];
1349 else if constexpr (_Np == 2)
1351 short __bool2 = (__x._M_to_bits() * 0x81) & 0x0101;
1352 _S_store<_Np>(__bool2, __mem);
1354 else if constexpr (_Np == 3)
1356 int __bool3 = (__x._M_to_bits() * 0x4081) & 0x010101;
1357 _S_store<_Np>(__bool3, __mem);
1361 __execute_n_times<__div_roundup(_Np, 4)>(
1362 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1363 constexpr int __offset = __i * 4;
1364 constexpr int __remaining = _Np - __offset;
1365 if constexpr (__remaining > 4 && __remaining <= 7)
1367 const _ULLong __bool7
1368 = (__x.template _M_extract<__offset>()._M_to_bits()
1370 & 0x0101010101010101ULL;
1371 _S_store<__remaining>(__bool7, __mem + __offset);
1373 else if constexpr (__remaining >= 4)
1375 int __bits = __x.template _M_extract<__offset>()._M_to_bits();
1376 if constexpr (__remaining > 7)
1378 const int __bool4 = (__bits * 0x204081) & 0x01010101;
1379 _S_store<4>(__bool4, __mem + __offset);
1387 template <
typename _Tp,
size_t _Np>
1388 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
1389 _S_blend(_SimdWrapper<__int_for_sizeof_t<_Tp>, _Np> __k,
1390 _SimdWrapper<_Tp, _Np> __at0, _SimdWrapper<_Tp, _Np> __at1)
1391 {
return __k._M_data ? __at1._M_data : __at0._M_data; }
1398template <
typename _Abi>
1399 struct _SimdImplBuiltin
1402 template <
typename _Tp>
1403 static constexpr size_t _S_max_store_size = 16;
1405 using abi_type = _Abi;
1407 template <
typename _Tp>
1408 using _TypeTag = _Tp*;
1410 template <
typename _Tp>
1411 using _SimdMember =
typename _Abi::template __traits<_Tp>::_SimdMember;
1413 template <
typename _Tp>
1414 using _MaskMember =
typename _Abi::template _MaskMember<_Tp>;
1416 template <
typename _Tp>
1417 static constexpr size_t _S_size = _Abi::template _S_size<_Tp>;
1419 template <
typename _Tp>
1420 static constexpr size_t _S_full_size = _Abi::template _S_full_size<_Tp>;
1422 using _CommonImpl =
typename _Abi::_CommonImpl;
1423 using _SuperImpl =
typename _Abi::_SimdImpl;
1424 using _MaskImpl =
typename _Abi::_MaskImpl;
1427 template <
typename _Tp,
size_t _Np>
1428 _GLIBCXX_SIMD_INTRINSIC
static constexpr simd<_Tp, _Abi>
1429 _M_make_simd(_SimdWrapper<_Tp, _Np> __x)
1430 {
return {__private_init, __x}; }
1432 template <
typename _Tp,
size_t _Np>
1433 _GLIBCXX_SIMD_INTRINSIC
static constexpr simd<_Tp, _Abi>
1434 _M_make_simd(__intrinsic_type_t<_Tp, _Np> __x)
1435 {
return {__private_init, __vector_bitcast<_Tp>(__x)}; }
1438 template <
typename _Tp>
1439 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdMember<_Tp>
1440 _S_broadcast(_Tp __x)
noexcept
1441 {
return __vector_broadcast<_S_full_size<_Tp>>(__x); }
1444 template <
typename _Fp,
typename _Tp>
1445 inline static constexpr _SimdMember<_Tp>
1446 _S_generator(_Fp&& __gen, _TypeTag<_Tp>)
1448 return __generate_vector<_Tp, _S_full_size<_Tp>>(
1449 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1450 if constexpr (__i < _S_size<_Tp>)
1458 template <
typename _Tp,
typename _Up>
1459 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdMember<_Tp>
1460 _S_load(
const _Up* __mem, _TypeTag<_Tp>)
noexcept
1462 constexpr size_t _Np = _S_size<_Tp>;
1463 constexpr size_t __max_load_size
1464 = (
sizeof(_Up) >= 4 && __have_avx512f) || __have_avx512bw ? 64
1465 : (is_floating_point_v<_Up> && __have_avx) || __have_avx2 ? 32
1467 constexpr size_t __bytes_to_load =
sizeof(_Up) * _Np;
1468 if (__builtin_is_constant_evaluated())
1469 return __generate_vector<_Tp, _S_full_size<_Tp>>(
1470 [&](
auto __i)
constexpr {
1471 return static_cast<_Tp
>(__i < _Np ? __mem[__i] : 0);
1473 else if constexpr (
sizeof(_Up) > 8)
1474 return __generate_vector<_Tp, _SimdMember<_Tp>::_S_full_size>(
1475 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1476 return static_cast<_Tp
>(__i < _Np ? __mem[__i] : 0);
1478 else if constexpr (is_same_v<_Up, _Tp>)
1479 return _CommonImpl::template _S_load<_Tp, _S_full_size<_Tp>,
1480 _Np *
sizeof(_Tp)>(__mem);
1481 else if constexpr (__bytes_to_load <= __max_load_size)
1482 return __convert<_SimdMember<_Tp>>(
1483 _CommonImpl::template _S_load<_Up, _Np>(__mem));
1484 else if constexpr (__bytes_to_load % __max_load_size == 0)
1486 constexpr size_t __n_loads = __bytes_to_load / __max_load_size;
1487 constexpr size_t __elements_per_load = _Np / __n_loads;
1488 return __call_with_n_evaluations<__n_loads>(
1489 [](
auto... __uncvted) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1490 return __convert<_SimdMember<_Tp>>(__uncvted...);
1491 }, [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1492 return _CommonImpl::template _S_load<_Up, __elements_per_load>(
1493 __mem + __i * __elements_per_load);
1496 else if constexpr (__bytes_to_load % (__max_load_size / 2) == 0
1497 && __max_load_size > 16)
1499 constexpr size_t __n_loads
1500 = __bytes_to_load / (__max_load_size / 2);
1501 constexpr size_t __elements_per_load = _Np / __n_loads;
1502 return __call_with_n_evaluations<__n_loads>(
1503 [](
auto... __uncvted) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1504 return __convert<_SimdMember<_Tp>>(__uncvted...);
1505 }, [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1506 return _CommonImpl::template _S_load<_Up, __elements_per_load>(
1507 __mem + __i * __elements_per_load);
1511 return __call_with_subscripts(
1512 __mem, make_index_sequence<_Np>(),
1513 [](
auto... __args) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1514 return __vector_type_t<_Tp, _S_full_size<_Tp>>{
static_cast<_Tp
>(__args)...};
1519 template <
typename _Tp,
size_t _Np,
typename _Up>
1520 static constexpr inline _SimdWrapper<_Tp, _Np>
1521 _S_masked_load(_SimdWrapper<_Tp, _Np> __merge, _MaskMember<_Tp> __k,
1522 const _Up* __mem)
noexcept
1524 _BitOps::_S_bit_iteration(_MaskImpl::_S_to_bits(__k),
1525 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1526 __merge._M_set(__i,
static_cast<_Tp
>(__mem[__i]));
1532 template <
typename _Tp,
typename _Up>
1533 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
1534 _S_store(_SimdMember<_Tp> __v, _Up* __mem, _TypeTag<_Tp>)
noexcept
1537 constexpr size_t _Np = _S_size<_Tp>;
1538 constexpr size_t __max_store_size
1539 = _SuperImpl::template _S_max_store_size<_Up>;
1540 if (__builtin_is_constant_evaluated())
1542 for (
size_t __i = 0; __i < _Np; ++__i)
1543 __mem[__i] = __v[__i];
1545 else if constexpr (
sizeof(_Up) > 8)
1546 __execute_n_times<_Np>([&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1547 __mem[__i] = __v[__i];
1549 else if constexpr (is_same_v<_Up, _Tp>)
1550 _CommonImpl::_S_store(__v, __mem);
1551 else if constexpr (
sizeof(_Up) * _Np <= __max_store_size)
1552 _CommonImpl::_S_store(_SimdWrapper<_Up, _Np>(__convert<_Up>(__v)),
1556 constexpr size_t __vsize = __max_store_size /
sizeof(_Up);
1558 constexpr size_t __stores = __div_roundup(_Np, __vsize);
1559 constexpr size_t __full_stores = _Np / __vsize;
1560 using _V = __vector_type_t<_Up, __vsize>;
1561 const array<_V, __stores> __converted
1562 = __convert_all<_V, __stores>(__v);
1563 __execute_n_times<__full_stores>(
1564 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1565 _CommonImpl::_S_store(__converted[__i], __mem + __i * __vsize);
1567 if constexpr (__full_stores < __stores)
1568 _CommonImpl::template _S_store<(_Np - __full_stores * __vsize)
1570 __converted[__full_stores], __mem + __full_stores * __vsize);
1575 template <
typename _Tp,
size_t _Np>
1576 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
1577 _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem, _MaskMember<_Tp> __k)
1579 _BitOps::_S_bit_iteration(
1580 _MaskImpl::_S_to_bits(__k),
1581 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1582 __mem[__i] = __v[__i];
1587 template <
typename _TW,
typename _TVT = _VectorTraits<_TW>,
1588 typename _Tp =
typename _TVT::value_type,
typename _Up>
1589 static constexpr inline void
1590 _S_masked_store(
const _TW __v, _Up* __mem,
const _MaskMember<_Tp> __k)
noexcept
1592 constexpr size_t _TV_size = _S_size<_Tp>;
1593 [[maybe_unused]]
const auto __vi = __to_intrin(__v);
1594 constexpr size_t __max_store_size
1595 = _SuperImpl::template _S_max_store_size<_Up>;
1599 _Up> || (is_integral_v<_Tp> && is_integral_v<_Up> &&
sizeof(_Tp) ==
sizeof(_Up)))
1602 const _MaskMember<_Up> __kk = [&]() _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1603 if constexpr (__is_bitmask_v<
decltype(__k)>)
1604 return _MaskMember<_Up>(__k._M_data);
1606 return __wrapper_bitcast<__int_for_sizeof_t<_Up>>(__k);
1608 _SuperImpl::_S_masked_store_nocvt(__wrapper_bitcast<_Up>(__v),
1611 else if constexpr (__vectorized_sizeof<_Up>() >
sizeof(_Up)
1613 template __converts_via_decomposition_v<
1614 _Tp, _Up, __max_store_size>)
1618 constexpr size_t _UW_size
1619 =
std::min(_TV_size, __max_store_size /
sizeof(_Up));
1620 static_assert(_UW_size <= _TV_size);
1621 using _UW = _SimdWrapper<_Up, _UW_size>;
1622 using _UV = __vector_type_t<_Up, _UW_size>;
1623 using _UAbi = simd_abi::deduce_t<_Up, _UW_size>;
1624 if constexpr (_UW_size == _TV_size)
1626 const _UW __converted = __convert<_UW>(__v);
1627 _SuperImpl::_S_masked_store_nocvt(
1629 _UAbi::_MaskImpl::template _S_convert<
1630 __int_for_sizeof_t<_Up>>(__k));
1634 static_assert(_UW_size *
sizeof(_Up) == __max_store_size);
1635 constexpr size_t _NFullStores = _TV_size / _UW_size;
1636 constexpr size_t _NAllStores
1637 = __div_roundup(_TV_size, _UW_size);
1638 constexpr size_t _NParts = _S_full_size<_Tp> / _UW_size;
1639 const array<_UV, _NAllStores> __converted
1640 = __convert_all<_UV, _NAllStores>(__v);
1641 __execute_n_times<_NFullStores>([&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1642 _SuperImpl::_S_masked_store_nocvt(
1643 _UW(__converted[__i]), __mem + __i * _UW_size,
1644 _UAbi::_MaskImpl::template _S_convert<
1645 __int_for_sizeof_t<_Up>>(
1646 __extract_part<__i, _NParts>(__k.__as_full_vector())));
1648 if constexpr (_NAllStores
1650 _SuperImpl::_S_masked_store_nocvt(
1651 _UW(__converted[_NFullStores]),
1652 __mem + _NFullStores * _UW_size,
1653 _UAbi::_MaskImpl::template _S_convert<
1654 __int_for_sizeof_t<_Up>>(
1655 __extract_part<_NFullStores, _NParts>(
1656 __k.__as_full_vector())));
1660 _BitOps::_S_bit_iteration(_MaskImpl::_S_to_bits(__k),
1661 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
1662 __mem[__i] =
static_cast<_Up
>(__v[__i]);
1667 template <
typename _Tp,
size_t _Np>
1668 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1669 _S_complement(_SimdWrapper<_Tp, _Np> __x)
noexcept
1670 {
return ~__x._M_data; }
1673 template <
typename _Tp,
size_t _Np>
1674 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1675 _S_unary_minus(_SimdWrapper<_Tp, _Np> __x)
noexcept
1679 return -__x._M_data;
1683 template <
typename _Tp,
size_t _Np>
1684 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1685 _S_plus(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1686 {
return __x._M_data + __y._M_data; }
1688 template <
typename _Tp,
size_t _Np>
1689 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1690 _S_minus(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1691 {
return __x._M_data - __y._M_data; }
1693 template <
typename _Tp,
size_t _Np>
1694 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1695 _S_multiplies(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1696 {
return __x._M_data * __y._M_data; }
1698 template <
typename _Tp,
size_t _Np>
1699 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1700 _S_divides(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1704 if constexpr (!_Abi::template _S_is_partial<_Tp>)
1705 return __x._M_data / __y._M_data;
1707 return __x._M_data / _Abi::__make_padding_nonzero(__y._M_data);
1710 template <
typename _Tp,
size_t _Np>
1711 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1712 _S_modulus(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1714 if constexpr (!_Abi::template _S_is_partial<_Tp>)
1715 return __x._M_data % __y._M_data;
1717 return __as_vector(__x)
1718 % _Abi::__make_padding_nonzero(__as_vector(__y));
1721 template <
typename _Tp,
size_t _Np>
1722 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1723 _S_bit_and(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1724 {
return __and(__x, __y); }
1726 template <
typename _Tp,
size_t _Np>
1727 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1728 _S_bit_or(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1729 {
return __or(__x, __y); }
1731 template <
typename _Tp,
size_t _Np>
1732 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1733 _S_bit_xor(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1734 {
return __xor(__x, __y); }
1736 template <
typename _Tp,
size_t _Np>
1737 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
1738 _S_bit_shift_left(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1739 {
return __x._M_data << __y._M_data; }
1741 template <
typename _Tp,
size_t _Np>
1742 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
1743 _S_bit_shift_right(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1744 {
return __x._M_data >> __y._M_data; }
1746 template <
typename _Tp,
size_t _Np>
1747 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1748 _S_bit_shift_left(_SimdWrapper<_Tp, _Np> __x,
int __y)
1749 {
return __x._M_data << __y; }
1751 template <
typename _Tp,
size_t _Np>
1752 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1753 _S_bit_shift_right(_SimdWrapper<_Tp, _Np> __x,
int __y)
1754 {
return __x._M_data >> __y; }
1758 template <
typename _Tp,
size_t _Np>
1759 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1760 _S_equal_to(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1761 {
return __x._M_data == __y._M_data; }
1764 template <
typename _Tp,
size_t _Np>
1765 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1766 _S_not_equal_to(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1767 {
return __x._M_data != __y._M_data; }
1770 template <
typename _Tp,
size_t _Np>
1771 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1772 _S_less(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1773 {
return __x._M_data < __y._M_data; }
1776 template <
typename _Tp,
size_t _Np>
1777 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1778 _S_less_equal(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1779 {
return __x._M_data <= __y._M_data; }
1782 template <
typename _Tp,
size_t _Np>
1783 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1784 _S_negate(_SimdWrapper<_Tp, _Np> __x)
noexcept
1785 {
return !__x._M_data; }
1788 template <
typename _Tp,
size_t _Np>
1789 _GLIBCXX_SIMD_NORMAL_MATH _GLIBCXX_SIMD_INTRINSIC
static constexpr
1790 _SimdWrapper<_Tp, _Np>
1791 _S_min(_SimdWrapper<_Tp, _Np> __a, _SimdWrapper<_Tp, _Np> __b)
1792 {
return __a._M_data < __b._M_data ? __a._M_data : __b._M_data; }
1794 template <
typename _Tp,
size_t _Np>
1795 _GLIBCXX_SIMD_NORMAL_MATH _GLIBCXX_SIMD_INTRINSIC
static constexpr
1796 _SimdWrapper<_Tp, _Np>
1797 _S_max(_SimdWrapper<_Tp, _Np> __a, _SimdWrapper<_Tp, _Np> __b)
1798 {
return __a._M_data > __b._M_data ? __a._M_data : __b._M_data; }
1800 template <
typename _Tp,
size_t _Np>
1801 _GLIBCXX_SIMD_NORMAL_MATH _GLIBCXX_SIMD_INTRINSIC
static constexpr
1802 pair<_SimdWrapper<_Tp, _Np>, _SimdWrapper<_Tp, _Np>>
1803 _S_minmax(_SimdWrapper<_Tp, _Np> __a, _SimdWrapper<_Tp, _Np> __b)
1805 return {__a._M_data < __b._M_data ? __a._M_data : __b._M_data,
1806 __a._M_data < __b._M_data ? __b._M_data : __a._M_data};
1810 template <
size_t _Np,
size_t... _Is,
size_t... _Zeros,
typename _Tp,
1811 typename _BinaryOperation>
1812 _GLIBCXX_SIMD_INTRINSIC
static constexpr _Tp
1813 _S_reduce_partial(index_sequence<_Is...>, index_sequence<_Zeros...>,
1814 simd<_Tp, _Abi> __x, _BinaryOperation&& __binary_op)
1816 using _V = __vector_type_t<_Tp, _Np / 2>;
1817 static_assert(
sizeof(_V) <=
sizeof(__x));
1820 using _FullSimd = __deduced_simd<_Tp, _VectorTraits<_V>::_S_full_size>;
1821 using _HalfSimd = __deduced_simd<_Tp, _Np / 2>;
1822 const auto __xx = __as_vector(__x);
1823 return _HalfSimd::abi_type::_SimdImpl::_S_reduce(
1824 static_cast<_HalfSimd
>(__as_vector(__binary_op(
1825 static_cast<_FullSimd
>(__intrin_bitcast<_V>(__xx)),
1826 static_cast<_FullSimd
>(__intrin_bitcast<_V>(
1827 __vector_permute<(_Np / 2 + _Is)..., (
int(_Zeros * 0) - 1)...>(
1832 template <
typename _Tp,
typename _BinaryOperation>
1833 _GLIBCXX_SIMD_INTRINSIC
static constexpr _Tp
1834 _S_reduce(simd<_Tp, _Abi> __x, _BinaryOperation&& __binary_op)
1836 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
1837 if constexpr (_Np == 1)
1839 else if constexpr (_Np == 2)
1840 return __binary_op(simd<_Tp, simd_abi::scalar>(__x[0]),
1841 simd<_Tp, simd_abi::scalar>(__x[1]))[0];
1842 else if (__builtin_is_constant_evaluated())
1844 simd<_Tp, simd_abi::scalar> __acc = __x[0];
1845 for (
size_t __i = 1; __i < _Np; ++__i)
1846 __acc = __binary_op(__acc, simd<_Tp, simd_abi::scalar>(__x[__i]));
1849 else if constexpr (_Abi::template _S_is_partial<_Tp>)
1851 [[maybe_unused]]
constexpr auto __full_size
1852 = _Abi::template _S_full_size<_Tp>;
1853 if constexpr (_Np == 3)
1855 __binary_op(simd<_Tp, simd_abi::scalar>(__x[0]),
1856 simd<_Tp, simd_abi::scalar>(__x[1])),
1857 simd<_Tp, simd_abi::scalar>(__x[2]))[0];
1858 else if constexpr (is_same_v<__remove_cvref_t<_BinaryOperation>,
1861 using _Ap = simd_abi::deduce_t<_Tp, __full_size>;
1862 return _Ap::_SimdImpl::_S_reduce(
1863 simd<_Tp, _Ap>(__private_init,
1864 _Abi::_S_masked(__as_vector(__x))),
1867 else if constexpr (is_same_v<__remove_cvref_t<_BinaryOperation>,
1870 using _Ap = simd_abi::deduce_t<_Tp, __full_size>;
1871 using _TW = _SimdWrapper<_Tp, __full_size>;
1872 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __implicit_mask_full
1873 = _Abi::template _S_implicit_mask<_Tp>().__as_full_vector();
1874 _GLIBCXX_SIMD_USE_CONSTEXPR _TW __one
1875 = __vector_broadcast<__full_size>(_Tp(1));
1876 const _TW __x_full = __data(__x).__as_full_vector();
1877 const _TW __x_padded_with_ones
1878 = _Ap::_CommonImpl::_S_blend(__implicit_mask_full, __one,
1880 return _Ap::_SimdImpl::_S_reduce(
1881 simd<_Tp, _Ap>(__private_init, __x_padded_with_ones),
1884 else if constexpr (_Np & 1)
1886 using _Ap = simd_abi::deduce_t<_Tp, _Np - 1>;
1888 simd<_Tp, simd_abi::scalar>(_Ap::_SimdImpl::_S_reduce(
1890 __intrin_bitcast<__vector_type_t<_Tp, _Np - 1>>(
1893 simd<_Tp, simd_abi::scalar>(__x[_Np - 1]))[0];
1896 return _S_reduce_partial<_Np>(
1897 make_index_sequence<_Np / 2>(),
1898 make_index_sequence<__full_size - _Np / 2>(), __x, __binary_op);
1900 else if constexpr (
sizeof(__x) == 16)
1902 if constexpr (_Np == 16)
1904 const auto __y = __data(__x);
1906 _M_make_simd<_Tp, _Np>(
1907 __vector_permute<0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
1909 _M_make_simd<_Tp, _Np>(
1910 __vector_permute<8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
1911 14, 14, 15, 15>(__y)));
1913 if constexpr (_Np >= 8)
1915 const auto __y = __vector_bitcast<short>(__data(__x));
1917 _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1918 __vector_permute<0, 0, 1, 1, 2, 2, 3, 3>(__y))),
1919 _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1920 __vector_permute<4, 4, 5, 5, 6, 6, 7, 7>(__y))));
1922 if constexpr (_Np >= 4)
1924 using _Up = conditional_t<is_floating_point_v<_Tp>, float,
int>;
1925 const auto __y = __vector_bitcast<_Up>(__data(__x));
1926 __x = __binary_op(__x,
1927 _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1928 __vector_permute<3, 2, 1, 0>(__y))));
1930 using _Up = conditional_t<is_floating_point_v<_Tp>, double, _LLong>;
1931 const auto __y = __vector_bitcast<_Up>(__data(__x));
1932 __x = __binary_op(__x, _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1933 __vector_permute<1, 1>(__y))));
1938 static_assert(
sizeof(__x) > __min_vector_size<_Tp>);
1939 static_assert((_Np & (_Np - 1)) == 0);
1940 using _Ap = simd_abi::deduce_t<_Tp, _Np / 2>;
1941 using _V = simd<_Tp, _Ap>;
1942 return _Ap::_SimdImpl::_S_reduce(
1943 __binary_op(_V(__private_init, __extract<0, 2>(__as_vector(__x))),
1945 __extract<1, 2>(__as_vector(__x)))),
1946 static_cast<_BinaryOperation&&
>(__binary_op));
1952#define _GLIBCXX_SIMD_MATH_FALLBACK(__name) \
1953 template <typename _Tp, typename... _More> \
1955 _S_##__name(const _Tp& __x, const _More&... __more) \
1957 return __generate_vector<_Tp>( \
1958 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
1959 return __name(__x[__i], __more[__i]...); \
1963#define _GLIBCXX_SIMD_MATH_FALLBACK_MASKRET(__name) \
1964 template <typename _Tp, typename... _More> \
1965 static typename _Tp::mask_type \
1966 _S_##__name(const _Tp& __x, const _More&... __more) \
1968 return __generate_vector<_Tp>( \
1969 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
1970 return __name(__x[__i], __more[__i]...); \
1974#define _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(_RetTp, __name) \
1975 template <typename _Tp, typename... _More> \
1977 _S_##__name(const _Tp& __x, const _More&... __more) \
1979 return __fixed_size_storage_t<_RetTp, \
1980 _VectorTraits<_Tp>::_S_partial_width>:: \
1981 _S_generate([&](auto __meta) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
1982 return __meta._S_generator( \
1983 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \
1984 return __name(__x[__meta._S_offset + __i], \
1985 __more[__meta._S_offset + __i]...); \
1987 static_cast<_RetTp*>(nullptr)); \
1991 _GLIBCXX_SIMD_MATH_FALLBACK(acos)
1992 _GLIBCXX_SIMD_MATH_FALLBACK(asin)
1993 _GLIBCXX_SIMD_MATH_FALLBACK(atan)
1994 _GLIBCXX_SIMD_MATH_FALLBACK(atan2)
1995 _GLIBCXX_SIMD_MATH_FALLBACK(cos)
1996 _GLIBCXX_SIMD_MATH_FALLBACK(sin)
1997 _GLIBCXX_SIMD_MATH_FALLBACK(tan)
1998 _GLIBCXX_SIMD_MATH_FALLBACK(acosh)
1999 _GLIBCXX_SIMD_MATH_FALLBACK(asinh)
2000 _GLIBCXX_SIMD_MATH_FALLBACK(atanh)
2001 _GLIBCXX_SIMD_MATH_FALLBACK(cosh)
2002 _GLIBCXX_SIMD_MATH_FALLBACK(sinh)
2003 _GLIBCXX_SIMD_MATH_FALLBACK(tanh)
2004 _GLIBCXX_SIMD_MATH_FALLBACK(exp)
2005 _GLIBCXX_SIMD_MATH_FALLBACK(exp2)
2006 _GLIBCXX_SIMD_MATH_FALLBACK(expm1)
2007 _GLIBCXX_SIMD_MATH_FALLBACK(ldexp)
2008 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
int, ilogb)
2009 _GLIBCXX_SIMD_MATH_FALLBACK(log)
2010 _GLIBCXX_SIMD_MATH_FALLBACK(log10)
2011 _GLIBCXX_SIMD_MATH_FALLBACK(log1p)
2012 _GLIBCXX_SIMD_MATH_FALLBACK(log2)
2013 _GLIBCXX_SIMD_MATH_FALLBACK(logb)
2016 _GLIBCXX_SIMD_MATH_FALLBACK(scalbn)
2017 _GLIBCXX_SIMD_MATH_FALLBACK(scalbln)
2018 _GLIBCXX_SIMD_MATH_FALLBACK(cbrt)
2019 _GLIBCXX_SIMD_MATH_FALLBACK(fabs)
2020 _GLIBCXX_SIMD_MATH_FALLBACK(pow)
2021 _GLIBCXX_SIMD_MATH_FALLBACK(sqrt)
2022 _GLIBCXX_SIMD_MATH_FALLBACK(erf)
2023 _GLIBCXX_SIMD_MATH_FALLBACK(erfc)
2024 _GLIBCXX_SIMD_MATH_FALLBACK(lgamma)
2025 _GLIBCXX_SIMD_MATH_FALLBACK(tgamma)
2027 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long, lrint)
2028 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long long, llrint)
2030 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long, lround)
2031 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long long, llround)
2033 _GLIBCXX_SIMD_MATH_FALLBACK(fmod)
2034 _GLIBCXX_SIMD_MATH_FALLBACK(remainder)
2036 template <
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
2038 _S_remquo(
const _Tp __x,
const _Tp __y,
2039 __fixed_size_storage_t<int, _TVT::_S_partial_width>* __z)
2041 return __generate_vector<_Tp>([&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2043 auto __r = remquo(__x[__i], __y[__i], &__tmp);
2044 __z->_M_set(__i, __tmp);
2050 _GLIBCXX_SIMD_MATH_FALLBACK(nextafter)
2051 _GLIBCXX_SIMD_MATH_FALLBACK(fdim)
2052 _GLIBCXX_SIMD_MATH_FALLBACK(fmax)
2053 _GLIBCXX_SIMD_MATH_FALLBACK(fmin)
2054 _GLIBCXX_SIMD_MATH_FALLBACK(fma)
2056 template <
typename _Tp,
size_t _Np>
2057 static constexpr _MaskMember<_Tp>
2058 _S_isgreater(_SimdWrapper<_Tp, _Np> __x,
2059 _SimdWrapper<_Tp, _Np> __y)
noexcept
2061 using _Ip = __int_for_sizeof_t<_Tp>;
2062 const auto __xn = __vector_bitcast<_Ip>(__x);
2063 const auto __yn = __vector_bitcast<_Ip>(__y);
2064 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2065 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2066 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2070 template <
typename _Tp,
size_t _Np>
2071 static constexpr _MaskMember<_Tp>
2072 _S_isgreaterequal(_SimdWrapper<_Tp, _Np> __x,
2073 _SimdWrapper<_Tp, _Np> __y)
noexcept
2075 using _Ip = __int_for_sizeof_t<_Tp>;
2076 const auto __xn = __vector_bitcast<_Ip>(__x);
2077 const auto __yn = __vector_bitcast<_Ip>(__y);
2078 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2079 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2080 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2084 template <
typename _Tp,
size_t _Np>
2085 static constexpr _MaskMember<_Tp>
2086 _S_isless(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
noexcept
2088 using _Ip = __int_for_sizeof_t<_Tp>;
2089 const auto __xn = __vector_bitcast<_Ip>(__x);
2090 const auto __yn = __vector_bitcast<_Ip>(__y);
2091 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2092 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2093 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2097 template <
typename _Tp,
size_t _Np>
2098 static constexpr _MaskMember<_Tp>
2099 _S_islessequal(_SimdWrapper<_Tp, _Np> __x,
2100 _SimdWrapper<_Tp, _Np> __y)
noexcept
2102 using _Ip = __int_for_sizeof_t<_Tp>;
2103 const auto __xn = __vector_bitcast<_Ip>(__x);
2104 const auto __yn = __vector_bitcast<_Ip>(__y);
2105 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2106 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2107 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2111 template <
typename _Tp,
size_t _Np>
2112 static constexpr _MaskMember<_Tp>
2113 _S_islessgreater(_SimdWrapper<_Tp, _Np> __x,
2114 _SimdWrapper<_Tp, _Np> __y)
noexcept
2116 return __andnot(_SuperImpl::_S_isunordered(__x, __y),
2117 _SuperImpl::_S_not_equal_to(__x, __y));
2120#undef _GLIBCXX_SIMD_MATH_FALLBACK
2121#undef _GLIBCXX_SIMD_MATH_FALLBACK_MASKRET
2122#undef _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET
2124 template <
typename _Tp,
size_t _Np>
2125 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2126 _S_abs(_SimdWrapper<_Tp, _Np> __x)
noexcept
2132 if constexpr (is_floating_point_v<_Tp>)
2137 return __and(_S_absmask<__vector_type_t<_Tp, _Np>>, __x._M_data);
2139 return __x._M_data < 0 ? -__x._M_data : __x._M_data;
2147 template <
typename _TV,
typename _UV>
2148 _GLIBCXX_SIMD_INTRINSIC
static constexpr _TV
2149 _S_plus_minus(_TV __x, _UV __y)
noexcept
2151#if defined __i386__ && !defined __SSE_MATH__
2152 if constexpr (
sizeof(__x) == 8)
2154 static_assert(is_same_v<_TV, __vector_type_t<float, 2>>);
2155 const auto __x4 = __vector_bitcast<float, 4>(__x);
2156 if constexpr (is_same_v<_TV, _UV>)
2157 return __vector_bitcast<float, 2>(
2158 _S_plus_minus(__x4, __vector_bitcast<float, 4>(__y)));
2160 return __vector_bitcast<float, 2>(_S_plus_minus(__x4, __y));
2163#if !defined __clang__ && __GCC_IEC_559 == 0
2164 if (__builtin_is_constant_evaluated()
2165 || (__builtin_constant_p(__x) && __builtin_constant_p(__y)))
2166 return (__x + __y) - __y;
2170 if constexpr(__have_sse)
2172 if constexpr (
sizeof(__x) >= 16)
2173 asm(
"" :
"+x"(__x));
2174 else if constexpr (is_same_v<__vector_type_t<float, 2>, _TV>)
2175 asm(
"" :
"+x"(__x[0]),
"+x"(__x[1]));
2177 __assert_unreachable<_TV>();
2179 else if constexpr(__have_neon)
2180 asm(
"" :
"+w"(__x));
2181 else if constexpr (__have_power_vmx)
2183 if constexpr (is_same_v<__vector_type_t<float, 2>, _TV>)
2184 asm(
"" :
"+fgr"(__x[0]),
"+fgr"(__x[1]));
2186 asm(
"" :
"+v"(__x));
2189 asm(
"" :
"+g"(__x));
2193 return (__x + __y) - __y;
2199 template <
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
2200 _GLIBCXX_SIMD_INTRINSIC
static _Tp
2201 _S_nearbyint(_Tp __x_)
noexcept
2203 using value_type =
typename _TVT::value_type;
2204 using _V =
typename _TVT::type;
2205 const _V __x = __x_;
2206 const _V __absx = __and(__x, _S_absmask<_V>);
2207 static_assert(__CHAR_BIT__ *
sizeof(1ull) >= __digits_v<value_type>);
2208 _GLIBCXX_SIMD_USE_CONSTEXPR _V __shifter_abs
2209 = _V() + (1ull << (__digits_v<value_type> - 1));
2210 const _V __shifter = __or(__and(_S_signmask<_V>, __x), __shifter_abs);
2211 const _V __shifted = _S_plus_minus(__x, __shifter);
2212 return __absx < __shifter_abs ? __shifted : __x;
2216 template <
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
2217 _GLIBCXX_SIMD_INTRINSIC
static _Tp
2218 _S_rint(_Tp __x)
noexcept
2219 {
return _SuperImpl::_S_nearbyint(__x); }
2222 template <
typename _Tp,
size_t _Np>
2223 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2224 _S_trunc(_SimdWrapper<_Tp, _Np> __x)
2226 using _V = __vector_type_t<_Tp, _Np>;
2227 const _V __absx = __and(__x._M_data, _S_absmask<_V>);
2228 static_assert(__CHAR_BIT__ *
sizeof(1ull) >= __digits_v<_Tp>);
2229 constexpr _Tp __shifter = 1ull << (__digits_v<_Tp> - 1);
2230 _V __truncated = _S_plus_minus(__absx, __shifter);
2231 __truncated -= __truncated > __absx ? _V() + 1 : _V();
2232 return __absx < __shifter ? __or(__xor(__absx, __x._M_data), __truncated)
2237 template <
typename _Tp,
size_t _Np>
2238 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2239 _S_round(_SimdWrapper<_Tp, _Np> __x)
2241 const auto __abs_x = _SuperImpl::_S_abs(__x);
2242 const auto __t_abs = _SuperImpl::_S_trunc(__abs_x)._M_data;
2244 = __t_abs + (__abs_x._M_data - __t_abs >= _Tp(.5) ? _Tp(1) : 0);
2245 return __or(__xor(__abs_x._M_data, __x._M_data), __r_abs);
2249 template <
typename _Tp,
size_t _Np>
2250 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2251 _S_floor(_SimdWrapper<_Tp, _Np> __x)
2253 const auto __y = _SuperImpl::_S_trunc(__x)._M_data;
2254 const auto __negative_input
2255 = __vector_bitcast<_Tp>(__x._M_data < __vector_broadcast<_Np, _Tp>(0));
2257 = __andnot(__vector_bitcast<_Tp>(__y == __x._M_data), __negative_input);
2258 return __or(__andnot(__mask, __y),
2259 __and(__mask, __y - __vector_broadcast<_Np, _Tp>(1)));
2263 template <
typename _Tp,
size_t _Np>
2264 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2265 _S_ceil(_SimdWrapper<_Tp, _Np> __x)
2267 const auto __y = _SuperImpl::_S_trunc(__x)._M_data;
2268 const auto __negative_input
2269 = __vector_bitcast<_Tp>(__x._M_data < __vector_broadcast<_Np, _Tp>(0));
2270 const auto __inv_mask
2271 = __or(__vector_bitcast<_Tp>(__y == __x._M_data), __negative_input);
2272 return __or(__and(__inv_mask, __y),
2273 __andnot(__inv_mask, __y + __vector_broadcast<_Np, _Tp>(1)));
2277 template <
typename _Tp,
size_t _Np>
2278 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2279 _S_isnan([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
2281#if __FINITE_MATH_ONLY__
2283#elif !defined __SUPPORT_SNAN__
2284 return ~(__x._M_data == __x._M_data);
2285#elif defined __STDC_IEC_559__
2286 using _Ip = __int_for_sizeof_t<_Tp>;
2287 const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
2289 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__infinity_v<_Tp>));
2290 return __infn < __absn;
2292#error "Not implemented: how to support SNaN but non-IEC559 floating-point?"
2297 template <
typename _Tp,
size_t _Np>
2298 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2299 _S_isfinite([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
2301#if __FINITE_MATH_ONLY__
2302 using _UV =
typename _MaskMember<_Tp>::_BuiltinType;
2303 _GLIBCXX_SIMD_USE_CONSTEXPR _UV __alltrue = ~_UV();
2307 using _Ip = __int_for_sizeof_t<_Tp>;
2308 const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
2310 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__finite_max_v<_Tp>));
2311 return __absn <= __maxn;
2316 template <
typename _Tp,
size_t _Np>
2317 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2318 _S_isunordered(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
2319 {
return __or(_S_isnan(__x), _S_isnan(__y)); }
2322 template <
typename _Tp,
size_t _Np>
2323 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2324 _S_signbit(_SimdWrapper<_Tp, _Np> __x)
2326 using _Ip = __int_for_sizeof_t<_Tp>;
2327 return __vector_bitcast<_Ip>(__x) < 0;
2334 template <
typename _Tp,
size_t _Np>
2335 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2336 _S_isinf([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
2338#if __FINITE_MATH_ONLY__
2341 return _SuperImpl::template _S_equal_to<_Tp, _Np>(_SuperImpl::_S_abs(__x),
2342 __vector_broadcast<_Np>(
2343 __infinity_v<_Tp>));
2358 template <
typename _Tp,
size_t _Np>
2359 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2360 _S_isnormal(_SimdWrapper<_Tp, _Np> __x)
2362 using _Ip = __int_for_sizeof_t<_Tp>;
2363 const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
2365 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__norm_min_v<_Tp>));
2366#if __FINITE_MATH_ONLY__
2367 return __absn >= __minn;
2370 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__finite_max_v<_Tp>));
2371 return __minn <= __absn && __absn <= __maxn;
2376 template <
typename _Tp,
size_t _Np>
2377 _GLIBCXX_SIMD_INTRINSIC
static __fixed_size_storage_t<int, _Np>
2378 _S_fpclassify(_SimdWrapper<_Tp, _Np> __x)
2380 using _I = __int_for_sizeof_t<_Tp>;
2382 = __vector_bitcast<_I>(__to_intrin(_SuperImpl::_S_abs(__x)));
2383 constexpr size_t _NI =
sizeof(__xn) /
sizeof(_I);
2384 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __minn
2385 = __vector_bitcast<_I>(__vector_broadcast<_NI>(__norm_min_v<_Tp>));
2387 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_normal
2388 = __vector_broadcast<_NI, _I>(FP_NORMAL);
2389#if !__FINITE_MATH_ONLY__
2390 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __infn
2391 = __vector_bitcast<_I>(__vector_broadcast<_NI>(__infinity_v<_Tp>));
2392 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_nan
2393 = __vector_broadcast<_NI, _I>(FP_NAN);
2394 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_infinite
2395 = __vector_broadcast<_NI, _I>(FP_INFINITE);
2397#ifndef __FAST_MATH__
2398 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_subnormal
2399 = __vector_broadcast<_NI, _I>(FP_SUBNORMAL);
2401 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_zero
2402 = __vector_broadcast<_NI, _I>(FP_ZERO);
2404 __vector_type_t<_I, _NI>
2405 __tmp = __xn < __minn
2406 #ifdef __FAST_MATH__
2409 ? (__xn == 0 ? __fp_zero : __fp_subnormal)
2411 #
if __FINITE_MATH_ONLY__
2414 : (__xn < __infn ? __fp_normal
2415 : (__xn == __infn ? __fp_infinite : __fp_nan));
2418 if constexpr (
sizeof(_I) ==
sizeof(
int))
2420 using _FixedInt = __fixed_size_storage_t<int, _Np>;
2421 const auto __as_int = __vector_bitcast<int, _Np>(__tmp);
2422 if constexpr (_FixedInt::_S_tuple_size == 1)
2424 else if constexpr (_FixedInt::_S_tuple_size == 2
2426 typename _FixedInt::_SecondType::_FirstAbi,
2428 return {__extract<0, 2>(__as_int), __as_int[_Np - 1]};
2429 else if constexpr (_FixedInt::_S_tuple_size == 2)
2430 return {__extract<0, 2>(__as_int),
2431 __auto_bitcast(__extract<1, 2>(__as_int))};
2433 __assert_unreachable<_Tp>();
2435 else if constexpr (_Np == 2 &&
sizeof(_I) == 8
2436 && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 2)
2438 const auto __aslong = __vector_bitcast<_LLong>(__tmp);
2439 return {int(__aslong[0]), {int(__aslong[1])}};
2441#if _GLIBCXX_SIMD_X86INTRIN
2442 else if constexpr (
sizeof(_Tp) == 8 &&
sizeof(__tmp) == 32
2443 && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
2444 return {_mm_packs_epi32(__to_intrin(__lo128(__tmp)),
2445 __to_intrin(__hi128(__tmp)))};
2446 else if constexpr (
sizeof(_Tp) == 8 &&
sizeof(__tmp) == 64
2447 && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
2448 return {_mm512_cvtepi64_epi32(__to_intrin(__tmp))};
2450 else if constexpr (__fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
2451 return {__call_with_subscripts<_Np>(__vector_bitcast<_LLong>(__tmp),
2452 [](
auto... __l) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2453 return __make_wrapper<int>(__l...);
2456 __assert_unreachable<_Tp>();
2460 template <
typename _Tp,
size_t _Np>
2461 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2462 _S_increment(_SimdWrapper<_Tp, _Np>& __x)
2463 { __x = __x._M_data + 1; }
2465 template <
typename _Tp,
size_t _Np>
2466 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2467 _S_decrement(_SimdWrapper<_Tp, _Np>& __x)
2468 { __x = __x._M_data - 1; }
2471 template <
typename _Tp,
size_t _Np,
typename _Up>
2472 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2473 _S_set(_SimdWrapper<_Tp, _Np>& __v,
int __i, _Up&& __x)
noexcept
2474 { __v._M_set(__i,
static_cast<_Up&&
>(__x)); }
2477 template <
typename _Tp,
typename _K,
size_t _Np>
2478 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2479 _S_masked_assign(_SimdWrapper<_K, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
2480 __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs)
2482 if (__k._M_is_constprop_none_of())
2484 else if (__k._M_is_constprop_all_of())
2487 __lhs = _CommonImpl::_S_blend(__k, __lhs, __rhs);
2490 template <
typename _Tp,
typename _K,
size_t _Np>
2491 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2492 _S_masked_assign(_SimdWrapper<_K, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
2493 __type_identity_t<_Tp> __rhs)
2495 if (__k._M_is_constprop_none_of())
2497 else if (__k._M_is_constprop_all_of())
2498 __lhs = __vector_broadcast<_Np>(__rhs);
2499 else if (__builtin_constant_p(__rhs) && __rhs == 0)
2501 if constexpr (!is_same_v<bool, _K>)
2505 = __andnot(__vector_bitcast<_Tp>(__k), __lhs._M_data);
2509 = _CommonImpl::_S_blend(__k, __lhs, _SimdWrapper<_Tp, _Np>());
2512 __lhs = _CommonImpl::_S_blend(__k, __lhs,
2513 _SimdWrapper<_Tp, _Np>(
2514 __vector_broadcast<_Np>(__rhs)));
2518 template <
typename _Op,
typename _Tp,
typename _K,
size_t _Np>
2519 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2520 _S_masked_cassign(
const _SimdWrapper<_K, _Np> __k,
2521 _SimdWrapper<_Tp, _Np>& __lhs,
2522 const __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs,
2525 if (__k._M_is_constprop_none_of())
2527 else if (__k._M_is_constprop_all_of())
2528 __lhs = __op(_SuperImpl{}, __lhs, __rhs);
2530 __lhs = _CommonImpl::_S_blend(__k, __lhs,
2531 __op(_SuperImpl{}, __lhs, __rhs));
2534 template <
typename _Op,
typename _Tp,
typename _K,
size_t _Np>
2535 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2536 _S_masked_cassign(
const _SimdWrapper<_K, _Np> __k,
2537 _SimdWrapper<_Tp, _Np>& __lhs,
2538 const __type_identity_t<_Tp> __rhs, _Op __op)
2539 { _S_masked_cassign(__k, __lhs, __vector_broadcast<_Np>(__rhs), __op); }
2542 template <
template <
typename>
class _Op,
typename _Tp,
typename _K,
2544 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2545 _S_masked_unary(
const _SimdWrapper<_K, _Np> __k,
2546 const _SimdWrapper<_Tp, _Np> __v)
2548 if (__k._M_is_constprop_none_of())
2550 auto __vv = _M_make_simd(__v);
2551 _Op<
decltype(__vv)> __op;
2552 if (__k._M_is_constprop_all_of())
2553 return __data(__op(__vv));
2554 else if constexpr (is_same_v<_Op<void>, __increment<void>>)
2556 static_assert(not std::is_same_v<_K, bool>);
2557 if constexpr (is_integral_v<_Tp>)
2559 return __v._M_data - __vector_bitcast<_Tp>(__k._M_data);
2560 else if constexpr (not __have_avx2)
2562 + __vector_bitcast<_Tp>(__k._M_data & __builtin_bit_cast(
2566 else if constexpr (is_same_v<_Op<void>, __decrement<void>>)
2568 static_assert(not std::is_same_v<_K, bool>);
2569 if constexpr (is_integral_v<_Tp>)
2571 return __v._M_data + __vector_bitcast<_Tp>(__k._M_data);
2572 else if constexpr (not __have_avx2)
2574 - __vector_bitcast<_Tp>(__k._M_data & __builtin_bit_cast(
2578 return _CommonImpl::_S_blend(__k, __v, __data(__op(__vv)));
2585struct _MaskImplBuiltinMixin
2587 template <
typename _Tp>
2588 using _TypeTag = _Tp*;
2591 template <
typename _Up,
size_t _ToN = 1>
2592 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Up, _ToN>
2593 _S_to_maskvector(
bool __x)
2595 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
2596 return __x ? __vector_type_t<_Up, _ToN>{~_Up()}
2597 : __vector_type_t<_Up, _ToN>{};
2600 template <
typename _Up,
size_t _UpN = 0,
size_t _Np,
bool _Sanitized,
2601 size_t _ToN = _UpN == 0 ? _Np : _UpN>
2602 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Up, _ToN>
2603 _S_to_maskvector(_BitMask<_Np, _Sanitized> __x)
2605 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
2606 return __generate_vector<__vector_type_t<_Up, _ToN>>(
2607 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2608 if constexpr (__i < _Np)
2609 return __x[__i] ? ~_Up() : _Up();
2615 template <
typename _Up,
size_t _UpN = 0,
typename _Tp,
size_t _Np,
2616 size_t _ToN = _UpN == 0 ? _Np : _UpN>
2617 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Up, _ToN>
2618 _S_to_maskvector(_SimdWrapper<_Tp, _Np> __x)
2620 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
2621 using _TW = _SimdWrapper<_Tp, _Np>;
2622 using _UW = _SimdWrapper<_Up, _ToN>;
2623 if constexpr (
sizeof(_Up) ==
sizeof(_Tp) &&
sizeof(_TW) ==
sizeof(_UW))
2624 return __wrapper_bitcast<_Up, _ToN>(__x);
2625 else if constexpr (is_same_v<_Tp, bool>)
2626 return _S_to_maskvector<_Up, _ToN>(_BitMask<_Np>(__x._M_data));
2653 return __generate_vector<__vector_type_t<_Up, _ToN>>(
2654 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2655 if constexpr (__i < _Np)
2656 return _Up(__x[__i.value]);
2666 template <
typename _Tp,
size_t _Np>
2667 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SanitizedBitMask<_Np>
2668 _S_to_bits(_SimdWrapper<_Tp, _Np> __x)
2670 static_assert(!is_same_v<_Tp, bool>);
2671 static_assert(_Np <= __CHAR_BIT__ *
sizeof(_ULLong));
2672 using _Up = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
2674 = __vector_bitcast<_Up>(__x) >> (
sizeof(_Up) * __CHAR_BIT__ - 1);
2676 __execute_n_times<_Np>(
2677 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2678 __r |= _ULLong(__bools[__i.value]) << __i;
2687template <
typename _Abi>
2688 struct _MaskImplBuiltin : _MaskImplBuiltinMixin
2690 using _MaskImplBuiltinMixin::_S_to_bits;
2691 using _MaskImplBuiltinMixin::_S_to_maskvector;
2694 template <
typename _Tp>
2695 using _SimdMember =
typename _Abi::template __traits<_Tp>::_SimdMember;
2697 template <
typename _Tp>
2698 using _MaskMember =
typename _Abi::template _MaskMember<_Tp>;
2700 using _SuperImpl =
typename _Abi::_MaskImpl;
2701 using _CommonImpl =
typename _Abi::_CommonImpl;
2703 template <
typename _Tp>
2704 static constexpr size_t _S_size = simd_size_v<_Tp, _Abi>;
2708 template <
typename _Tp>
2709 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
2710 _S_broadcast(
bool __x)
2711 {
return __x ? _Abi::template _S_implicit_mask<_Tp>() : _MaskMember<_Tp>(); }
2715 template <
typename _Tp>
2716 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
2717 _S_load(
const bool* __mem)
2719 using _I = __int_for_sizeof_t<_Tp>;
2720 if (not __builtin_is_constant_evaluated())
2721 if constexpr (
sizeof(_Tp) ==
sizeof(
bool))
2724 = _CommonImpl::template _S_load<_I, _S_size<_Tp>>(__mem);
2728 return __generate_vector<_I, _S_size<_Tp>>(
2729 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2730 return __mem[__i] ? ~_I() : _I();
2736 template <
typename _Tp,
size_t _Np,
bool _Sanitized>
2737 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
2738 _S_convert(_BitMask<_Np, _Sanitized> __x)
2740 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2741 return _SimdWrapper<
bool, simd_size_v<_Tp, _Abi>>(__x._M_to_bits());
2743 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2745 __x._M_sanitized());
2748 template <
typename _Tp,
size_t _Np>
2749 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
2750 _S_convert(_SimdWrapper<bool, _Np> __x)
2752 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2753 return _SimdWrapper<
bool, simd_size_v<_Tp, _Abi>>(__x._M_data);
2755 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2757 _BitMask<_Np>(__x._M_data)._M_sanitized());
2760 template <
typename _Tp,
typename _Up,
size_t _Np>
2761 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
2762 _S_convert(_SimdWrapper<_Up, _Np> __x)
2764 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2765 return _SimdWrapper<
bool, simd_size_v<_Tp, _Abi>>(
2766 _SuperImpl::_S_to_bits(__x));
2768 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2772 template <
typename _Tp,
typename _Up,
typename _UAbi>
2773 _GLIBCXX_SIMD_INTRINSIC
static constexpr auto
2774 _S_convert(simd_mask<_Up, _UAbi> __x)
2776 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2778 using _R = _SimdWrapper<bool, simd_size_v<_Tp, _Abi>>;
2779 if constexpr (__is_builtin_bitmask_abi<_UAbi>())
2780 return _R(__data(__x));
2781 else if constexpr (__is_scalar_abi<_UAbi>())
2782 return _R(__data(__x));
2783 else if constexpr (__is_fixed_size_abi_v<_UAbi>)
2784 return _R(__data(__x)._M_to_bits());
2786 return _R(_UAbi::_MaskImpl::_S_to_bits(__data(__x))._M_to_bits());
2789 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2796 template <
typename _Tp,
size_t _Np>
2797 static inline _SimdWrapper<_Tp, _Np>
2798 _S_masked_load(_SimdWrapper<_Tp, _Np> __merge,
2799 _SimdWrapper<_Tp, _Np> __mask,
const bool* __mem)
noexcept
2802 auto __tmp = __wrapper_bitcast<__int_for_sizeof_t<_Tp>>(__merge);
2803 _BitOps::_S_bit_iteration(_SuperImpl::_S_to_bits(__mask),
2804 [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2805 __tmp._M_set(__i, -__mem[__i]);
2807 __merge = __wrapper_bitcast<_Tp>(__tmp);
2812 template <
typename _Tp,
size_t _Np>
2813 _GLIBCXX_SIMD_INTRINSIC
static constexpr void
2814 _S_store(_SimdWrapper<_Tp, _Np> __v,
bool* __mem)
noexcept
2816 __execute_n_times<_Np>([&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2817 __mem[__i] = __v[__i];
2822 template <
typename _Tp,
size_t _Np>
2824 _S_masked_store(
const _SimdWrapper<_Tp, _Np> __v,
bool* __mem,
2825 const _SimdWrapper<_Tp, _Np> __k)
noexcept
2827 _BitOps::_S_bit_iteration(_SuperImpl::_S_to_bits(__k),
2828 [&](
auto __i)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2829 __mem[__i] = __v[__i];
2834 template <
size_t _Np,
typename _Tp>
2835 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2836 _S_from_bitmask(_SanitizedBitMask<_Np> __bits, _TypeTag<_Tp>)
2837 {
return _SuperImpl::template _S_to_maskvector<_Tp, _S_size<_Tp>>(__bits); }
2840 template <
typename _Tp,
size_t _Np>
2841 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2842 _S_logical_and(
const _SimdWrapper<_Tp, _Np>& __x,
const _SimdWrapper<_Tp, _Np>& __y)
2843 {
return __and(__x._M_data, __y._M_data); }
2845 template <
typename _Tp,
size_t _Np>
2846 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2847 _S_logical_or(
const _SimdWrapper<_Tp, _Np>& __x,
const _SimdWrapper<_Tp, _Np>& __y)
2848 {
return __or(__x._M_data, __y._M_data); }
2850 template <
typename _Tp,
size_t _Np>
2851 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2852 _S_bit_not(
const _SimdWrapper<_Tp, _Np>& __x)
2854 if constexpr (_Abi::template _S_is_partial<_Tp>)
2855 return __andnot(__x, __wrapper_bitcast<_Tp>(
2856 _Abi::template _S_implicit_mask<_Tp>()));
2858 return __not(__x._M_data);
2861 template <
typename _Tp,
size_t _Np>
2862 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2863 _S_bit_and(
const _SimdWrapper<_Tp, _Np>& __x,
const _SimdWrapper<_Tp, _Np>& __y)
2864 {
return __and(__x._M_data, __y._M_data); }
2866 template <
typename _Tp,
size_t _Np>
2867 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2868 _S_bit_or(
const _SimdWrapper<_Tp, _Np>& __x,
const _SimdWrapper<_Tp, _Np>& __y)
2869 {
return __or(__x._M_data, __y._M_data); }
2871 template <
typename _Tp,
size_t _Np>
2872 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2873 _S_bit_xor(
const _SimdWrapper<_Tp, _Np>& __x,
const _SimdWrapper<_Tp, _Np>& __y)
2874 {
return __xor(__x._M_data, __y._M_data); }
2877 template <
typename _Tp,
size_t _Np>
2878 static constexpr void
2879 _S_set(_SimdWrapper<_Tp, _Np>& __k,
int __i,
bool __x)
noexcept
2881 if constexpr (is_same_v<_Tp, bool>)
2882 __k._M_set(__i, __x);
2885 static_assert(is_same_v<_Tp, __int_for_sizeof_t<_Tp>>);
2886 if (__builtin_is_constant_evaluated())
2888 __k = __generate_from_n_evaluations<_Np,
2889 __vector_type_t<_Tp, _Np>>(
2890 [&](
auto __j) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2891 if (__i ==
static_cast<int>(__j))
2898 __k._M_data[__i] = -__x;
2903 template <
typename _Tp,
size_t _Np>
2904 _GLIBCXX_SIMD_INTRINSIC
static void
2905 _S_masked_assign(_SimdWrapper<_Tp, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
2906 __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs)
2907 { __lhs = _CommonImpl::_S_blend(__k, __lhs, __rhs); }
2909 template <
typename _Tp,
size_t _Np>
2910 _GLIBCXX_SIMD_INTRINSIC
static void
2911 _S_masked_assign(_SimdWrapper<_Tp, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
bool __rhs)
2913 if (__builtin_constant_p(__rhs))
2916 __lhs = __andnot(__k, __lhs);
2918 __lhs = __or(__k, __lhs);
2921 __lhs = _CommonImpl::_S_blend(__k, __lhs,
2922 __data(simd_mask<_Tp, _Abi>(__rhs)));
2927 template <
typename _Tp>
2928 _GLIBCXX_SIMD_INTRINSIC
static bool
2929 _S_all_of(simd_mask<_Tp, _Abi> __k)
2931 return __call_with_subscripts(
2932 __data(__k), make_index_sequence<_S_size<_Tp>>(),
2933 [](
const auto... __ent)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
2934 {
return (... && !(__ent == 0)); });
2939 template <
typename _Tp>
2940 _GLIBCXX_SIMD_INTRINSIC
static bool
2941 _S_any_of(simd_mask<_Tp, _Abi> __k)
2943 return __call_with_subscripts(
2944 __data(__k), make_index_sequence<_S_size<_Tp>>(),
2945 [](
const auto... __ent)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
2946 {
return (... || !(__ent == 0)); });
2951 template <
typename _Tp>
2952 _GLIBCXX_SIMD_INTRINSIC
static bool
2953 _S_none_of(simd_mask<_Tp, _Abi> __k)
2955 return __call_with_subscripts(
2956 __data(__k), make_index_sequence<_S_size<_Tp>>(),
2957 [](
const auto... __ent)
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
2958 {
return (... && (__ent == 0)); });
2963 template <
typename _Tp>
2964 _GLIBCXX_SIMD_INTRINSIC
static bool
2965 _S_some_of(simd_mask<_Tp, _Abi> __k)
2967 const int __n_true = _SuperImpl::_S_popcount(__k);
2968 return __n_true > 0 && __n_true < int(_S_size<_Tp>);
2973 template <
typename _Tp>
2974 _GLIBCXX_SIMD_INTRINSIC
static int
2975 _S_popcount(simd_mask<_Tp, _Abi> __k)
2977 using _I = __int_for_sizeof_t<_Tp>;
2978 if constexpr (is_default_constructible_v<simd<_I, _Abi>>)
2980 simd<_I, _Abi>(__private_init, __wrapper_bitcast<_I>(__data(__k))));
2982 return -
reduce(__bit_cast<rebind_simd_t<_I, simd<_Tp, _Abi>>>(
2983 simd<_Tp, _Abi>(__private_init, __data(__k))));
2988 template <
typename _Tp>
2989 _GLIBCXX_SIMD_INTRINSIC
static int
2990 _S_find_first_set(simd_mask<_Tp, _Abi> __k)
2991 {
return std::__countr_zero(_SuperImpl::_S_to_bits(__data(__k))._M_to_bits()); }
2995 template <
typename _Tp>
2996 _GLIBCXX_SIMD_INTRINSIC
static int
2997 _S_find_last_set(simd_mask<_Tp, _Abi> __k)
2998 {
return std::__bit_width(_SuperImpl::_S_to_bits(__data(__k))._M_to_bits()) - 1; }
3004_GLIBCXX_SIMD_END_NAMESPACE
integral_constant< bool, true > true_type
The type used as a compile-time boolean with true value.
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
constexpr const _Tp & min(const _Tp &, const _Tp &)
This does what you think it does.
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.