39#ifndef _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_ 
   40#define _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_ 
   42#if __cplusplus >= 201703L 
   46_GLIBCXX_SIMD_BEGIN_NAMESPACE
 
   49template <
size_t _I, 
typename _Tp>
 
   50  struct __simd_tuple_element;
 
   52template <
typename _Tp, 
typename _A0, 
typename... _As>
 
   53  struct __simd_tuple_element<0, _SimdTuple<_Tp, _A0, _As...>>
 
   54  { 
using type = simd<_Tp, _A0>; };
 
   56template <
size_t _I, 
typename _Tp, 
typename _A0, 
typename... _As>
 
   57  struct __simd_tuple_element<_I, _SimdTuple<_Tp, _A0, _As...>>
 
   58  { 
using type = 
typename __simd_tuple_element<_I - 1, _SimdTuple<_Tp, _As...>>::type; };
 
   60template <
size_t _I, 
typename _Tp>
 
   61  using __simd_tuple_element_t = 
typename __simd_tuple_element<_I, _Tp>::type;
 
   66template <
typename _Tp, 
typename... _A0s, 
typename... _A1s>
 
   67  _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple<_Tp, _A0s..., _A1s...>
 
   68  __simd_tuple_concat(
const _SimdTuple<_Tp, _A0s...>& __left,
 
   69                      const _SimdTuple<_Tp, _A1s...>& __right)
 
   71    if constexpr (
sizeof...(_A0s) == 0)
 
   73    else if constexpr (
sizeof...(_A1s) == 0)
 
   76      return {__left.first, __simd_tuple_concat(__left.second, __right)};
 
   79template <
typename _Tp, 
typename _A10, 
typename... _A1s>
 
   80  _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple<_Tp, simd_abi::scalar, _A10, _A1s...>
 
   81  __simd_tuple_concat(
const _Tp& __left, 
const _SimdTuple<_Tp, _A10, _A1s...>& __right)
 
   82  { 
return {__left, __right}; }
 
   88template <
size_t _Np, 
typename _Tp>
 
   89  _GLIBCXX_SIMD_INTRINSIC 
constexpr decltype(
auto)
 
   90  __simd_tuple_pop_front(_Tp&& __x)
 
   92    if constexpr (_Np == 0)
 
   93      return static_cast<_Tp&&
>(__x);
 
   96        using _Up = __remove_cvref_t<_Tp>;
 
   97        static_assert(_Np >= _Up::_S_first_size);
 
   98        return __simd_tuple_pop_front<_Np - _Up::_S_first_size>(__x.second);
 
  106struct __as_simd_tuple {};
 
  108template <
typename _Tp, 
typename _A0, 
typename... _Abis>
 
  109  _GLIBCXX_SIMD_INTRINSIC 
constexpr simd<_Tp, _A0>
 
  110  __simd_tuple_get_impl(__as_simd, 
const _SimdTuple<_Tp, _A0, _Abis...>& __t, _SizeConstant<0>)
 
  111  { 
return {__private_init, __t.first}; }
 
  113template <
typename _Tp, 
typename _A0, 
typename... _Abis>
 
  114  _GLIBCXX_SIMD_INTRINSIC 
constexpr const auto&
 
  115  __simd_tuple_get_impl(__as_simd_tuple, 
const _SimdTuple<_Tp, _A0, _Abis...>& __t,
 
  117  { 
return __t.first; }
 
  119template <
typename _Tp, 
typename _A0, 
typename... _Abis>
 
  120  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto&
 
  121  __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _A0, _Abis...>& __t, _SizeConstant<0>)
 
  122  { 
return __t.first; }
 
  124template <
typename _R, 
size_t _Np, 
typename _Tp, 
typename... _Abis>
 
  125  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto 
  126  __simd_tuple_get_impl(_R, 
const _SimdTuple<_Tp, _Abis...>& __t, _SizeConstant<_Np>)
 
  127  { 
return __simd_tuple_get_impl(_R(), __t.second, _SizeConstant<_Np - 1>()); }
 
  129template <
size_t _Np, 
typename _Tp, 
typename... _Abis>
 
  130  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto&
 
  131  __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _Abis...>& __t, _SizeConstant<_Np>)
 
  132  { 
return __simd_tuple_get_impl(__as_simd_tuple(), __t.second, _SizeConstant<_Np - 1>()); }
 
  134template <
size_t _Np, 
typename _Tp, 
typename... _Abis>
 
  135  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto 
  136  __get_simd_at(
const _SimdTuple<_Tp, _Abis...>& __t)
 
  137  { 
return __simd_tuple_get_impl(__as_simd(), __t, _SizeConstant<_Np>()); }
 
  141template <
size_t _Np, 
typename _Tp, 
typename... _Abis>
 
  142  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto 
  143  __get_tuple_at(
const _SimdTuple<_Tp, _Abis...>& __t)
 
  144  { 
return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>()); }
 
  146template <
size_t _Np, 
typename _Tp, 
typename... _Abis>
 
  147  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto&
 
  148  __get_tuple_at(_SimdTuple<_Tp, _Abis...>& __t)
 
  149  { 
return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>()); }
 
  152template <
typename _Tp, 
typename _Abi, 
size_t _Offset>
 
  153  struct __tuple_element_meta : 
public _Abi::_SimdImpl
 
  155    static_assert(is_same_v<
typename _Abi::_SimdImpl::abi_type,
 
  158    using value_type = _Tp;
 
  159    using abi_type = _Abi;
 
  160    using _Traits = _SimdTraits<_Tp, _Abi>;
 
  161    using _MaskImpl = 
typename _Abi::_MaskImpl;
 
  162    using _MaskMember = 
typename _Traits::_MaskMember;
 
  163    using simd_type = simd<_Tp, _Abi>;
 
  164    static constexpr size_t _S_offset = _Offset;
 
  165    static constexpr size_t _S_size() { 
return simd_size<_Tp, _Abi>::value; }
 
  166    static constexpr _MaskImpl _S_mask_impl = {};
 
  168    template <
size_t _Np, 
bool _Sanitized>
 
  169      _GLIBCXX_SIMD_INTRINSIC 
static constexpr auto 
  170      _S_submask(_BitMask<_Np, _Sanitized> __bits)
 
  171      { 
return __bits.template _M_extract<_Offset, _S_size()>(); }
 
  173    template <
size_t _Np, 
bool _Sanitized>
 
  174      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
  175      _S_make_mask(_BitMask<_Np, _Sanitized> __bits)
 
  177        return _MaskImpl::template _S_convert<_Tp>(
 
  178          __bits.template _M_extract<_Offset, _S_size()>()._M_sanitized());
 
  181    _GLIBCXX_SIMD_INTRINSIC 
static constexpr _ULLong
 
  182    _S_mask_to_shifted_ullong(_MaskMember __k)
 
  183    { 
return _MaskImpl::_S_to_bits(__k).to_ullong() << _Offset; }
 
  186template <
size_t _Offset, 
typename _Tp, 
typename _Abi, 
typename... _As>
 
  188  __tuple_element_meta<_Tp, _Abi, _Offset>
 
  189  __make_meta(
const _SimdTuple<_Tp, _Abi, _As...>&)
 
  194template <
size_t _Offset, 
typename _Base>
 
  195  struct _WithOffset : 
public _Base
 
  197    static inline constexpr size_t _S_offset = _Offset;
 
  199    _GLIBCXX_SIMD_INTRINSIC 
char*
 
  201    { 
return reinterpret_cast<char*
>(
this) + _S_offset * 
sizeof(
typename _Base::value_type); }
 
  203    _GLIBCXX_SIMD_INTRINSIC 
const char*
 
  204    _M_as_charptr()
 const 
  205    { 
return reinterpret_cast<const char*
>(
this) + _S_offset * 
sizeof(
typename _Base::value_type); }
 
  209template <
size_t _O0, 
size_t _O1, 
typename _Base>
 
  210  struct _WithOffset<_O0, _WithOffset<_O1, _Base>> {};
 
  212template <
size_t _Offset, 
typename _Tp>
 
  214  __add_offset(_Tp& __base)
 
  215  { 
return static_cast<_WithOffset<_Offset, __remove_cvref_t<_Tp>
>&>(
__base); }
 
  217template <
size_t _Offset, 
typename _Tp>
 
  219  __add_offset(
const _Tp& __base)
 
  220  { 
return static_cast<const _WithOffset<_Offset, __remove_cvref_t<_Tp>
>&>(
__base); }
 
  222template <
size_t _Offset, 
size_t _ExistingOffset, 
typename _Tp>
 
  224  __add_offset(_WithOffset<_ExistingOffset, _Tp>& __base)
 
  225  { 
return static_cast<_WithOffset<_Offset + _ExistingOffset, _Tp>&
>(
static_cast<_Tp&
>(
__base)); }
 
  227template <
size_t _Offset, 
size_t _ExistingOffset, 
typename _Tp>
 
  229  __add_offset(
const _WithOffset<_ExistingOffset, _Tp>& __base)
 
  231    return static_cast<const _WithOffset<_Offset + _ExistingOffset, _Tp>&
>(
 
  232      static_cast<const _Tp&
>(
__base));
 
  235template <
typename _Tp>
 
  236  constexpr inline size_t __offset = 0;
 
  238template <
size_t _Offset, 
typename _Tp>
 
  239  constexpr inline size_t __offset<_WithOffset<_Offset, _Tp>>
 
  240    = _WithOffset<_Offset, _Tp>::_S_offset;
 
  242template <
typename _Tp>
 
  243  constexpr inline size_t __offset<const _Tp> = __offset<_Tp>;
 
  245template <
typename _Tp>
 
  246  constexpr inline size_t __offset<_Tp&> = __offset<_Tp>;
 
  248template <
typename _Tp>
 
  249  constexpr inline size_t __offset<_Tp&&> = __offset<_Tp>;
 
  254template <
typename _Tp>
 
  255  struct _SimdTuple<_Tp>
 
  257    using value_type = _Tp;
 
  258    static constexpr size_t _S_tuple_size = 0;
 
  259    static constexpr size_t _S_size() { 
return 0; }
 
  263template <
typename _FirstType, 
typename _SecondType>
 
  264  struct _SimdTupleData
 
  269    _GLIBCXX_SIMD_INTRINSIC
 
  271    _M_is_constprop()
 const 
  273      if constexpr (is_class_v<_FirstType>)
 
  274        return first._M_is_constprop() && second._M_is_constprop();
 
  276        return __builtin_constant_p(first) && second._M_is_constprop();
 
  280template <
typename _FirstType, 
typename _Tp>
 
  281  struct _SimdTupleData<_FirstType, _SimdTuple<_Tp>>
 
  284    static constexpr _SimdTuple<_Tp> second = {};
 
  286    _GLIBCXX_SIMD_INTRINSIC
 
  288    _M_is_constprop()
 const 
  290      if constexpr (is_class_v<_FirstType>)
 
  291        return first._M_is_constprop();
 
  293        return __builtin_constant_p(first);
 
  298template <
typename _Tp, 
typename _Abi0, 
typename... _Abis>
 
  299  struct _SimdTuple<_Tp, _Abi0, _Abis...>
 
  300    : _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
 
  301                     _SimdTuple<_Tp, _Abis...>>
 
  303    static_assert(!__is_fixed_size_abi_v<_Abi0>);
 
  304    using value_type = _Tp;
 
  305    using _FirstType = 
typename _SimdTraits<_Tp, _Abi0>::_SimdMember;
 
  306    using _FirstAbi = _Abi0;
 
  307    using _SecondType = _SimdTuple<_Tp, _Abis...>;
 
  308    static constexpr size_t _S_tuple_size = 
sizeof...(_Abis) + 1;
 
  310    static constexpr size_t _S_size()
 
  311    { 
return simd_size_v<_Tp, _Abi0> + _SecondType::_S_size(); }
 
  313    static constexpr size_t _S_first_size = simd_size_v<_Tp, _Abi0>;
 
  314    static constexpr bool _S_is_homogeneous = (is_same_v<_Abi0, _Abis> && ...);
 
  316    using _Base = _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
 
  317                                 _SimdTuple<_Tp, _Abis...>>;
 
  321    _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple() = 
default;
 
  322    _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple(
const _SimdTuple&) = 
default;
 
  323    _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple& operator=(
const _SimdTuple&)
 
  326    template <
typename _Up>
 
  327      _GLIBCXX_SIMD_INTRINSIC 
constexpr 
  328      _SimdTuple(_Up&& __x)
 
  329      : _Base{static_cast<_Up&&>(__x)} {}
 
  331    template <
typename _Up, 
typename _Up2>
 
  332      _GLIBCXX_SIMD_INTRINSIC 
constexpr 
  333      _SimdTuple(_Up&& __x, _Up2&& __y)
 
  334      : _Base{static_cast<_Up&&>(__x), static_cast<_Up2&&>(__y)} {}
 
  336    template <
typename _Up>
 
  337      _GLIBCXX_SIMD_INTRINSIC 
constexpr 
  338      _SimdTuple(_Up&& __x, _SimdTuple<_Tp>)
 
  339      : _Base{static_cast<_Up&&>(__x)} {}
 
  341    _GLIBCXX_SIMD_INTRINSIC 
char*
 
  343    { 
return reinterpret_cast<char*
>(
this); }
 
  345    _GLIBCXX_SIMD_INTRINSIC 
const char*
 
  346    _M_as_charptr()
 const 
  347    { 
return reinterpret_cast<const char*
>(
this); }
 
  349    template <
size_t _Np>
 
  350      _GLIBCXX_SIMD_INTRINSIC 
constexpr auto&
 
  353        if constexpr (_Np == 0)
 
  356          return second.template _M_at<_Np - 1>();
 
  359    template <
size_t _Np>
 
  360      _GLIBCXX_SIMD_INTRINSIC 
constexpr const auto&
 
  363        if constexpr (_Np == 0)
 
  366          return second.template _M_at<_Np - 1>();
 
  369    template <
size_t _Np>
 
  370      _GLIBCXX_SIMD_INTRINSIC 
constexpr auto 
  373        if constexpr (_Np == 0)
 
  374          return simd<_Tp, _Abi0>(__private_init, first);
 
  376          return second.template _M_simd_at<_Np - 1>();
 
  379    template <
size_t _Offset = 0, 
typename _Fp>
 
  380      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple
 
  381      _S_generate(_Fp&& __gen, _SizeConstant<_Offset> = {})
 
  383        auto&& __first = __gen(__tuple_element_meta<_Tp, _Abi0, _Offset>());
 
  384        if constexpr (_S_tuple_size == 1)
 
  388                  _SecondType::_S_generate(
 
  389                    static_cast<_Fp&&
>(__gen),
 
  390                    _SizeConstant<_Offset + simd_size_v<_Tp, _Abi0>>())};
 
  393    template <
size_t _Offset = 0, 
typename _Fp, 
typename... _More>
 
  394      _GLIBCXX_SIMD_INTRINSIC _SimdTuple
 
  395      _M_apply_wrapped(_Fp&& __fun, 
const _More&... __more)
 const 
  398          = __fun(__make_meta<_Offset>(*
this), first, __more.first...);
 
  399        if constexpr (_S_tuple_size == 1)
 
  404            second.template _M_apply_wrapped<_Offset + simd_size_v<_Tp, _Abi0>>(
 
  405              static_cast<_Fp&&
>(__fun), __more.second...)};
 
  408    template <
typename _Tup>
 
  409      _GLIBCXX_SIMD_INTRINSIC 
constexpr decltype(
auto)
 
  410      _M_extract_argument(_Tup&& __tup)
 const 
  412        using _TupT = 
typename __remove_cvref_t<_Tup>::value_type;
 
  413        if constexpr (is_same_v<_SimdTuple, __remove_cvref_t<_Tup>>)
 
  415        else if (__builtin_is_constant_evaluated())
 
  416          return __fixed_size_storage_t<_TupT, _S_first_size>::_S_generate(
 
  417                   [&](
auto __meta) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  418                     return __meta._S_generator(
 
  419                              [&](
auto __i) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  421                              }, 
static_cast<_TupT*
>(
nullptr));
 
  425            __fixed_size_storage_t<_TupT, _S_first_size> __r;
 
  426            __builtin_memcpy(__r._M_as_charptr(), __tup._M_as_charptr(),
 
  432    template <
typename _Tup>
 
  433      _GLIBCXX_SIMD_INTRINSIC 
constexpr auto&
 
  434      _M_skip_argument(_Tup&& __tup)
 const 
  436        static_assert(_S_tuple_size > 1);
 
  437        using _Up = __remove_cvref_t<_Tup>;
 
  438        constexpr size_t __off = __offset<_Up>;
 
  439        if constexpr (_S_first_size == _Up::_S_first_size && __off == 0)
 
  441        else if constexpr (_S_first_size > _Up::_S_first_size
 
  442                           && _S_first_size % _Up::_S_first_size == 0
 
  444          return __simd_tuple_pop_front<_S_first_size>(__tup);
 
  445        else if constexpr (_S_first_size + __off < _Up::_S_first_size)
 
  446          return __add_offset<_S_first_size>(__tup);
 
  447        else if constexpr (_S_first_size + __off == _Up::_S_first_size)
 
  450          __assert_unreachable<_Tup>();
 
  453    template <
size_t _Offset, 
typename... _More>
 
  454      _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  455      _M_assign_front(
const _SimdTuple<_Tp, _Abi0, _More...>& __x) &
 
  457        static_assert(_Offset == 0);
 
  459        if constexpr (
sizeof...(_More) > 0)
 
  461            static_assert(
sizeof...(_Abis) >= 
sizeof...(_More));
 
  462            second.template _M_assign_front<0>(__x.second);
 
  466    template <
size_t _Offset>
 
  467      _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  468      _M_assign_front(
const _FirstType& __x) &
 
  470        static_assert(_Offset == 0);
 
  474    template <
size_t _Offset, 
typename... _As>
 
  475      _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  476      _M_assign_front(
const _SimdTuple<_Tp, _As...>& __x) &
 
  478        __builtin_memcpy(_M_as_charptr() + _Offset * 
sizeof(value_type),
 
  480                         sizeof(_Tp) * _SimdTuple<_Tp, _As...>::_S_size());
 
  488    template <
typename _Fp, 
typename... _More>
 
  489      _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple
 
  490      _M_apply_per_chunk(_Fp&& __fun, _More&&... __more)
 const 
  494                         is_lvalue_reference<_More>,
 
  495                         negation<is_const<remove_reference_t<_More>>>>) )
 
  498            auto&& __first = [&](
auto... __args) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  499              auto __r = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
 
  501              [[maybe_unused]] 
auto&& __ignore_me = {(
 
  502                [](
auto&& __dst, 
const auto& __src) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  503                  if constexpr (is_assignable_v<
decltype(__dst),
 
  506                      __dst.template _M_assign_front<__offset<
decltype(__dst)>>(
 
  509                }(
static_cast<_More&&
>(__more), __args),
 
  512            }(_M_extract_argument(__more)...);
 
  513            if constexpr (_S_tuple_size == 1)
 
  517                      second._M_apply_per_chunk(
static_cast<_Fp&&
>(__fun),
 
  518                                                _M_skip_argument(__more)...)};
 
  522            auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
 
  523                                   _M_extract_argument(__more)...);
 
  524            if constexpr (_S_tuple_size == 1)
 
  528                      second._M_apply_per_chunk(
static_cast<_Fp&&
>(__fun),
 
  529                                                _M_skip_argument(__more)...)};
 
  533    template <
typename _R = _Tp, 
typename _Fp, 
typename... _More>
 
  534      _GLIBCXX_SIMD_INTRINSIC 
constexpr auto 
  535      _M_apply_r(_Fp&& __fun, 
const _More&... __more)
 const 
  537        auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
 
  539        if constexpr (_S_tuple_size == 1)
 
  542          return __simd_tuple_concat<_R>(
 
  543            __first, second.template _M_apply_r<_R>(
static_cast<_Fp&&
>(__fun),
 
  547    template <
typename _Fp, 
typename... _More>
 
  548      _GLIBCXX_SIMD_INTRINSIC 
constexpr friend _SanitizedBitMask<_S_size()>
 
  549      _M_test(
const _Fp& __fun, 
const _SimdTuple& __x, 
const _More&... __more)
 
  551        const _SanitizedBitMask<_S_first_size> __first
 
  552          = _Abi0::_MaskImpl::_S_to_bits(
 
  553            __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), __x.first,
 
  555        if constexpr (_S_tuple_size == 1)
 
  558          return _M_test(__fun, __x.second, __more.second...)
 
  559            ._M_prepend(__first);
 
  562    template <
typename _Up, _Up _I>
 
  563      _GLIBCXX_SIMD_INTRINSIC 
constexpr _Tp
 
  564      operator[](integral_constant<_Up, _I>) 
const noexcept 
  566        if constexpr (_I < simd_size_v<_Tp, _Abi0>)
 
  567          return _M_subscript_read(_I);
 
  569          return second[integral_constant<_Up, _I - simd_size_v<_Tp, _Abi0>>()];
 
  573    operator[](
size_t __i) 
const noexcept 
  575      if constexpr (_S_tuple_size == 1)
 
  576        return _M_subscript_read(__i);
 
  577#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS 
  578      else if (not __builtin_is_constant_evaluated())
 
  579        return reinterpret_cast<const __may_alias<_Tp>*
>(
this)[__i];
 
  581      else if constexpr (__is_scalar_abi<_Abi0>())
 
  583          const _Tp* ptr = &first;
 
  587        return __i < simd_size_v<_Tp, _Abi0> ? _M_subscript_read(__i)
 
  588                                             : second[__i - simd_size_v<_Tp, _Abi0>];
 
  592    _M_set(
size_t __i, _Tp __val) 
noexcept 
  594      if constexpr (_S_tuple_size == 1)
 
  595        return _M_subscript_write(__i, __val);
 
  596#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS 
  597      else if (not __builtin_is_constant_evaluated())
 
  598        reinterpret_cast<__may_alias<_Tp>*
>(
this)[__i] = __val;
 
  600      else if (__i < simd_size_v<_Tp, _Abi0>)
 
  601        _M_subscript_write(__i, __val);
 
  603        second._M_set(__i - simd_size_v<_Tp, _Abi0>, __val);
 
  609    _M_subscript_read([[maybe_unused]] 
size_t __i) 
const noexcept 
  611      if constexpr (__is_vectorizable_v<_FirstType>)
 
  618    _M_subscript_write([[maybe_unused]] 
size_t __i, _Tp __y) 
noexcept 
  620      if constexpr (__is_vectorizable_v<_FirstType>)
 
  623        first._M_set(__i, __y);
 
  630template <
typename _Tp, 
typename _A0>
 
  631  _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple<_Tp, _A0>
 
  632  __make_simd_tuple(simd<_Tp, _A0> __x0)
 
  633  { 
return {__data(__x0)}; }
 
  635template <
typename _Tp, 
typename _A0, 
typename... _As>
 
  636  _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple<_Tp, _A0, _As...>
 
  637  __make_simd_tuple(
const simd<_Tp, _A0>& __x0, 
const simd<_Tp, _As>&... __xs)
 
  638  { 
return {__data(__x0), __make_simd_tuple(__xs...)}; }
 
  640template <
typename _Tp, 
typename _A0>
 
  641  _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple<_Tp, _A0>
 
  642  __make_simd_tuple(
const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0)
 
  645template <
typename _Tp, 
typename _A0, 
typename _A1, 
typename... _Abis>
 
  646  _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple<_Tp, _A0, _A1, _Abis...>
 
  648    const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0,
 
  649    const typename _SimdTraits<_Tp, _A1>::_SimdMember& __arg1,
 
  650    const typename _SimdTraits<_Tp, _Abis>::_SimdMember&... __args)
 
  651  { 
return {__arg0, __make_simd_tuple<_Tp, _A1, _Abis...>(__arg1, __args...)}; }
 
  654template <
typename _Tp, 
size_t _Np, 
typename _V, 
size_t _NV, 
typename... _VX>
 
  655  _GLIBCXX_SIMD_INTRINSIC 
constexpr __fixed_size_storage_t<_Tp, _Np>
 
  656  __to_simd_tuple(
const array<_V, _NV>& __from, 
const _VX... __fromX);
 
  658template <
typename _Tp, 
size_t _Np,
 
  660          typename _R = __fixed_size_storage_t<_Tp, _Np>, 
typename _V0,
 
  661          typename _V0VT = _VectorTraits<_V0>, 
typename... _VX>
 
  662  _GLIBCXX_SIMD_INTRINSIC _R 
constexpr __to_simd_tuple(
const _V0 __from0, 
const _VX... __fromX)
 
  664    static_assert(is_same_v<typename _V0VT::value_type, _Tp>);
 
  665    static_assert(_Offset < _V0VT::_S_full_size);
 
  666    using _R0 = __vector_type_t<_Tp, _R::_S_first_size>;
 
  667    if constexpr (_R::_S_tuple_size == 1)
 
  669        if constexpr (_Np == 1)
 
  670          return _R{__from0[_Offset]};
 
  671        else if constexpr (_Offset == 0 && _V0VT::_S_full_size >= _Np)
 
  672          return _R{__intrin_bitcast<_R0>(__from0)};
 
  673        else if constexpr (_Offset * 2 == _V0VT::_S_full_size
 
  674                           && _V0VT::_S_full_size / 2 >= _Np)
 
  675          return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0))};
 
  676        else if constexpr (_Offset * 4 == _V0VT::_S_full_size
 
  677                           && _V0VT::_S_full_size / 4 >= _Np)
 
  678          return _R{__intrin_bitcast<_R0>(__extract_part<1, 4>(__from0))};
 
  680          __assert_unreachable<_Tp>();
 
  684        if constexpr (1 == _R::_S_first_size)
 
  686            if constexpr (_Offset + 1 < _V0VT::_S_full_size)
 
  687              return _R{__from0[_Offset],
 
  688                        __to_simd_tuple<_Tp, _Np - 1, _Offset + 1>(__from0,
 
  691              return _R{__from0[_Offset],
 
  692                        __to_simd_tuple<_Tp, _Np - 1, 0>(__fromX...)};
 
  696        else if constexpr (_V0VT::_S_full_size == _R::_S_first_size
 
  699                    __to_simd_tuple<_Tp, _Np - _R::_S_first_size>(__fromX...)};
 
  702        else if constexpr (_V0VT::_S_full_size > _R::_S_first_size
 
  704          return _R{__intrin_bitcast<_R0>(__from0),
 
  705                    __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
 
  706                                    _R::_S_first_size>(__from0, __fromX...)};
 
  710        else if constexpr (_Offset * 4 == _V0VT::_S_full_size
 
  711                           && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
 
  712          return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
 
  713                    __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
 
  714                                    _Offset + _R::_S_first_size>(__from0,
 
  719        else if constexpr (_Offset * 2 == _V0VT::_S_full_size
 
  720                           && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
 
  721          return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
 
  722                    __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
 
  723                                    _Offset + _R::_S_first_size>(__from0,
 
  727        else if constexpr (_Offset * 2 == _V0VT::_S_full_size
 
  728                           && _V0VT::_S_full_size / 2 >= _R::_S_first_size)
 
  729          return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0)),
 
  730                    __to_simd_tuple<_Tp, _Np - _R::_S_first_size, 0>(
 
  735          __assert_unreachable<_Tp>();
 
  739template <
typename _Tp, 
size_t _Np, 
typename _V, 
size_t _NV, 
typename... _VX>
 
  740  _GLIBCXX_SIMD_INTRINSIC 
constexpr __fixed_size_storage_t<_Tp, _Np>
 
  741  __to_simd_tuple(
const array<_V, _NV>& __from, 
const _VX... __fromX)
 
  743    if constexpr (is_same_v<_Tp, _V>)
 
  747          "An array of scalars must be the last argument to __to_simd_tuple");
 
  748        return __call_with_subscripts(
 
  749                 __from, make_index_sequence<_NV>(),
 
  750                 [&](
const auto... __args) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  751                   return __simd_tuple_concat(
 
  752                            _SimdTuple<_Tp, simd_abi::scalar>{__args}..., _SimdTuple<_Tp>());
 
  756      return __call_with_subscripts(
 
  757               __from, make_index_sequence<_NV>(),
 
  758               [&](
const auto... __args) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  759                 return __to_simd_tuple<_Tp, _Np>(__args..., __fromX...);
 
  763template <
size_t, 
typename _Tp>
 
  764  using __to_tuple_helper = _Tp;
 
  766template <
typename _Tp, 
typename _A0, 
size_t _NOut, 
size_t _Np,
 
  768  _GLIBCXX_SIMD_INTRINSIC __fixed_size_storage_t<_Tp, _NOut>
 
  769  __to_simd_tuple_impl(index_sequence<_Indexes...>,
 
  770      const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
 
  772    return __make_simd_tuple<_Tp, __to_tuple_helper<_Indexes, _A0>...>(
 
  773      __args[_Indexes]...);
 
  776template <
typename _Tp, 
typename _A0, 
size_t _NOut, 
size_t _Np,
 
  777          typename _R = __fixed_size_storage_t<_Tp, _NOut>>
 
  778  _GLIBCXX_SIMD_INTRINSIC _R
 
  779  __to_simd_tuple_sized(
 
  780    const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
 
  782    static_assert(_Np * simd_size_v<_Tp, _A0> >= _NOut);
 
  783    return __to_simd_tuple_impl<_Tp, _A0, _NOut>(
 
  784      make_index_sequence<_R::_S_tuple_size>(), __args);
 
  788template <
typename _Tp>
 
  789  _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple<_Tp>
 
  790  __optimize_simd_tuple(
const _SimdTuple<_Tp>)
 
  793template <
typename _Tp, 
typename _Ap>
 
  794  _GLIBCXX_SIMD_INTRINSIC 
constexpr const _SimdTuple<_Tp, _Ap>&
 
  795  __optimize_simd_tuple(
const _SimdTuple<_Tp, _Ap>& __x)
 
  798template <
typename _Tp, 
typename _A0, 
typename _A1, 
typename... _Abis,
 
  799          typename _R = __fixed_size_storage_t<
 
  800            _Tp, _SimdTuple<_Tp, _A0, _A1, _Abis...>::_S_size()>>
 
  801  _GLIBCXX_SIMD_INTRINSIC 
constexpr _R
 
  802  __optimize_simd_tuple(
const _SimdTuple<_Tp, _A0, _A1, _Abis...>& __x)
 
  804    using _Tup = _SimdTuple<_Tp, _A0, _A1, _Abis...>;
 
  805    if constexpr (is_same_v<_R, _Tup>)
 
  807    else if constexpr (is_same_v<
typename _R::_FirstType,
 
  808                                 typename _Tup::_FirstType>)
 
  809      return {__x.first, __optimize_simd_tuple(__x.second)};
 
  810    else if constexpr (__is_scalar_abi<_A0>()
 
  811                       || _A0::template _S_is_partial<_Tp>)
 
  812      return {__generate_from_n_evaluations<_R::_S_first_size,
 
  813                                            typename _R::_FirstType>(
 
  814                [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { 
return __x[__i]; }),
 
  815              __optimize_simd_tuple(
 
  816                __simd_tuple_pop_front<_R::_S_first_size>(__x))};
 
  817    else if constexpr (is_same_v<_A0, _A1>
 
  818        && _R::_S_first_size == simd_size_v<_Tp, _A0> + simd_size_v<_Tp, _A1>)
 
  819      return {__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
 
  820              __optimize_simd_tuple(__x.second.second)};
 
  821    else if constexpr (
sizeof...(_Abis) >= 2
 
  822        && _R::_S_first_size == (4 * simd_size_v<_Tp, _A0>)
 
  823        && simd_size_v<_Tp, _A0> == __simd_tuple_element_t<
 
  824            (
sizeof...(_Abis) >= 2 ? 3 : 0), _Tup>::
size())
 
  826        __concat(__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
 
  827                 __concat(__x.template _M_at<2>(), __x.template _M_at<3>())),
 
  828        __optimize_simd_tuple(__x.second.second.second.second)};
 
  831        static_assert(
sizeof(_R) == 
sizeof(__x));
 
  833        __builtin_memcpy(__r._M_as_charptr(), __x._M_as_charptr(),
 
  834                         sizeof(_Tp) * _R::_S_size());
 
  840template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _Fp>
 
  841  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  842  __for_each(
const _SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
 
  843  { 
static_cast<_Fp&&
>(__fun)(__make_meta<_Offset>(__t), __t.first); }
 
  845template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _A1,
 
  846          typename... _As, 
typename _Fp>
 
  847  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  848  __for_each(
const _SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
 
  850    __fun(__make_meta<_Offset>(__t), __t.first);
 
  851    __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
 
  852                                                     static_cast<_Fp&&
>(__fun));
 
  856template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _Fp>
 
  857  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  858  __for_each(_SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
 
  859  { 
static_cast<_Fp&&
>(__fun)(__make_meta<_Offset>(__t), __t.first); }
 
  861template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _A1,
 
  862          typename... _As, 
typename _Fp>
 
  863  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  864  __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
 
  866    __fun(__make_meta<_Offset>(__t), __t.first);
 
  867    __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
 
  868                                                     static_cast<_Fp&&
>(__fun));
 
  872template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _Fp>
 
  873  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  874  __for_each(_SimdTuple<_Tp, _A0>& __a, 
const _SimdTuple<_Tp, _A0>& __b, _Fp&& __fun)
 
  875  { 
static_cast<_Fp&&
>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first); }
 
  877template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _A1,
 
  878          typename... _As, 
typename _Fp>
 
  879  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  880  __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __a,
 
  881             const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
 
  883    __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
 
  884    __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
 
  885                                                     static_cast<_Fp&&
>(__fun));
 
  889template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _Fp>
 
  890  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  891  __for_each(
const _SimdTuple<_Tp, _A0>& __a, 
const _SimdTuple<_Tp, _A0>& __b, _Fp&& __fun)
 
  892  { 
static_cast<_Fp&&
>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first); }
 
  894template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _A1,
 
  895          typename... _As, 
typename _Fp>
 
  896  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  897  __for_each(
const _SimdTuple<_Tp, _A0, _A1, _As...>& __a,
 
  898             const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
 
  900    __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
 
  901    __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
 
  902                                                     static_cast<_Fp&&
>(__fun));
 
  907template <
int _Index, 
int _Total, 
int _Combine, 
typename _Tp, 
typename _A0, 
typename... _As>
 
  908  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto  
  909  __extract_part(
const _SimdTuple<_Tp, _A0, _As...>& __x)
 
  915    using _Tuple = _SimdTuple<_Tp, _A0, _As...>;
 
  916    static_assert(_Index + _Combine <= _Total && _Index >= 0 && _Total >= 1);
 
  917    constexpr size_t _Np = _Tuple::_S_size();
 
  918    static_assert(_Np >= _Total && _Np % _Total == 0);
 
  919    constexpr size_t __values_per_part = _Np / _Total;
 
  920    [[maybe_unused]] 
constexpr size_t __values_to_skip
 
  921      = _Index * __values_per_part;
 
  922    constexpr size_t __return_size = __values_per_part * _Combine;
 
  923    using _RetAbi = simd_abi::deduce_t<_Tp, __return_size>;
 
  926    if constexpr (_Index == 0 && _Tuple::_S_first_size == __return_size)
 
  927      return __x.first._M_data;
 
  928    else if constexpr (_Index == 0 && _Total == _Combine)
 
  930    else if constexpr (_Index == 0 && _Tuple::_S_first_size >= __return_size)
 
  931      return __intrin_bitcast<__vector_type_t<_Tp, __return_size>>(
 
  932        __as_vector(__x.first));
 
  935    else if constexpr (__values_to_skip >= _Tuple::_S_first_size)
 
  937        if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
 
  939            constexpr int __parts_in_first
 
  940              = _Tuple::_S_first_size / __values_per_part;
 
  941            return __extract_part<_Index - __parts_in_first,
 
  942                                  _Total - __parts_in_first, _Combine>(
 
  946          return __extract_part<__values_to_skip - _Tuple::_S_first_size,
 
  947                                _Np - _Tuple::_S_first_size, __return_size>(
 
  952    else if constexpr (__return_size > _Tuple::_S_first_size - __values_to_skip)
 
  954#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS 
  955        const __may_alias<_Tp>* 
const element_ptr
 
  956          = 
reinterpret_cast<const __may_alias<_Tp>*
>(&__x) + __values_to_skip;
 
  957        return __as_vector(simd<_Tp, _RetAbi>(element_ptr, element_aligned));
 
  959        [[maybe_unused]] 
constexpr size_t __offset = __values_to_skip;
 
  960        return __as_vector(simd<_Tp, _RetAbi>(
 
  961                             [&](
auto __i) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  962                               constexpr _SizeConstant<__i + __offset> __k;
 
  969    else if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
 
  970      return __extract_part<_Index, _Tuple::_S_first_size / __values_per_part,
 
  971                            _Combine>(__x.first);
 
  973      return __extract_part<__values_to_skip, _Tuple::_S_first_size,
 
  974                            _Combine * __values_per_part>(__x.first);
 
  979template <
typename _Tp, 
int _Np, 
typename _Tuple,
 
  980          typename _Next = simd<_Tp, _AllNativeAbis::_BestAbi<_Tp, _Np>>,
 
  981          int _Remain = _Np - int(_Next::size())>
 
  982  struct __fixed_size_storage_builder;
 
  984template <
typename _Tp, 
int _Np>
 
  985  struct __fixed_size_storage
 
  986  : 
public __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp>> {};
 
  988template <
typename _Tp, 
int _Np, 
typename... _As, 
typename _Next>
 
  989  struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
 
  991  { 
using type = _SimdTuple<_Tp, _As..., 
typename _Next::abi_type>; };
 
  993template <
typename _Tp, 
int _Np, 
typename... _As, 
typename _Next, 
int _Remain>
 
  994  struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
 
  997    using type = 
typename __fixed_size_storage_builder<
 
  998      _Tp, _Remain, _SimdTuple<_Tp, _As..., 
typename _Next::abi_type>>::type;
 
 1003template <
typename _Tp>
 
 1006template <
size_t _I0, 
size_t... _Is>
 
 1011    template <
size_t _First, 
size_t _Add>
 
 1012    using _Prepend = 
index_sequence<_First, _I0 + _Add, (_Is + _Add)...>;
 
 1015template <
typename _Tp>
 
 1016  struct _AbisInSimdTuple;
 
 1018template <
typename _Tp>
 
 1019  struct _AbisInSimdTuple<_SimdTuple<_Tp>>
 
 1021    using _Counts = index_sequence<0>;
 
 1022    using _Begins = index_sequence<0>;
 
 1025template <
typename _Tp, 
typename _Ap>
 
 1026  struct _AbisInSimdTuple<_SimdTuple<_Tp, _Ap>>
 
 1028    using _Counts = index_sequence<1>;
 
 1029    using _Begins = index_sequence<0>;
 
 1032template <
typename _Tp, 
typename _A0, 
typename... _As>
 
 1033  struct _AbisInSimdTuple<_SimdTuple<_Tp, _A0, _A0, _As...>>
 
 1035    using _Counts = 
typename _SeqOp<
typename _AbisInSimdTuple<
 
 1036      _SimdTuple<_Tp, _A0, _As...>>::_Counts>::_FirstPlusOne;
 
 1037    using _Begins = 
typename _SeqOp<
typename _AbisInSimdTuple<
 
 1038      _SimdTuple<_Tp, _A0, _As...>>::_Begins>::_NotFirstPlusOne;
 
 1041template <
typename _Tp, 
typename _A0, 
typename _A1, 
typename... _As>
 
 1042  struct _AbisInSimdTuple<_SimdTuple<_Tp, _A0, _A1, _As...>>
 
 1044    using _Counts = 
typename _SeqOp<
typename _AbisInSimdTuple<
 
 1045      _SimdTuple<_Tp, _A1, _As...>>::_Counts>::template _Prepend<1, 0>;
 
 1046    using _Begins = 
typename _SeqOp<
typename _AbisInSimdTuple<
 
 1047      _SimdTuple<_Tp, _A1, _As...>>::_Begins>::template _Prepend<0, 1>;
 
 1052template <
typename _Tp, 
bool = is_arithmetic_v<__remove_cvref_t<_Tp>>>
 
 1053  struct __autocvt_to_simd
 
 1056    using _TT = __remove_cvref_t<_Tp>;
 
 1065      static_assert(is_lvalue_reference<_Tp>::value, 
"");
 
 1066      static_assert(!is_const<_Tp>::value, 
"");
 
 1073      static_assert(is_lvalue_reference<_Tp>::value, 
"");
 
 1074      static_assert(!is_const<_Tp>::value, 
"");
 
 1079    __autocvt_to_simd(_Tp dd) : _M_data(dd) {}
 
 1081    template <
typename _Abi>
 
 1083      operator simd<typename _TT::value_type, _Abi>()
 
 1084      { 
return {__private_init, _M_data}; }
 
 1086    template <
typename _Abi>
 
 1088      operator simd<typename _TT::value_type, _Abi>&()
 
 1089      { 
return *
reinterpret_cast<simd<typename _TT::value_type, _Abi>*
>(&_M_data); }
 
 1091    template <
typename _Abi>
 
 1093      operator simd<typename _TT::value_type, _Abi>*()
 
 1094      { 
return reinterpret_cast<simd<typename _TT::value_type, _Abi>*
>(&_M_data); }
 
 1097template <
typename _Tp>
 
 1098  __autocvt_to_simd(_Tp &&) -> __autocvt_to_simd<_Tp>;
 
 1100template <
typename _Tp>
 
 1101  struct __autocvt_to_simd<_Tp, true>
 
 1103    using _TT = __remove_cvref_t<_Tp>;
 
 1105    fixed_size_simd<_TT, 1> _M_fd;
 
 1107    constexpr inline __autocvt_to_simd(_Tp dd) : _M_data(dd), _M_fd(_M_data) {}
 
 1109    ~__autocvt_to_simd()
 
 1110    { _M_data = __data(_M_fd).first; }
 
 1113    operator fixed_size_simd<_TT, 1>()
 
 1117    operator fixed_size_simd<_TT, 1> &()
 
 1119      static_assert(is_lvalue_reference<_Tp>::value, 
"");
 
 1120      static_assert(!is_const<_Tp>::value, 
"");
 
 1125    operator fixed_size_simd<_TT, 1> *()
 
 1127      static_assert(is_lvalue_reference<_Tp>::value, 
"");
 
 1128      static_assert(!is_const<_Tp>::value, 
"");
 
 1135struct _CommonImplFixedSize;
 
 1136template <
int _Np> 
struct _SimdImplFixedSize;
 
 1137template <
int _Np> 
struct _MaskImplFixedSize;
 
 1140  struct simd_abi::_Fixed
 
 1142    template <
typename _Tp> 
static constexpr size_t _S_size = _Np;
 
 1143    template <
typename _Tp> 
static constexpr size_t _S_full_size = _Np;
 
 1145    struct _IsValidAbiTag : 
public __bool_constant<(_Np > 0)> {};
 
 1147    template <
typename _Tp>
 
 1148      struct _IsValidSizeFor
 
 1149      : __bool_constant<(_Np <= simd_abi::max_fixed_size<_Tp>)> {};
 
 1151    template <typename _Tp>
 
 1152      struct _IsValid : conjunction<_IsValidAbiTag, __is_vectorizable<_Tp>,
 
 1153                                    _IsValidSizeFor<_Tp>> {};
 
 1155    template <typename _Tp>
 
 1156      static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
 
 1160    _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
 
 1161    _S_masked(_BitMask<_Np> __x)
 
 1162    { return __x._M_sanitized(); }
 
 1164    _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
 
 1165    _S_masked(_SanitizedBitMask<_Np> __x)
 
 1170    using _CommonImpl = _CommonImplFixedSize;
 
 1171    using _SimdImpl = _SimdImplFixedSize<_Np>;
 
 1172    using _MaskImpl = _MaskImplFixedSize<_Np>;
 
 1176    template <typename _Tp, bool = _S_is_valid_v<_Tp>>
 
 1177      struct __traits : _InvalidTraits {};
 
 1179    template <typename _Tp>
 
 1180      struct __traits<_Tp, true>
 
 1182        using _IsValid = true_type;
 
 1183        using _SimdImpl = _SimdImplFixedSize<_Np>;
 
 1184        using _MaskImpl = _MaskImplFixedSize<_Np>;
 
 1187        using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
 
 1188        using _MaskMember = _SanitizedBitMask<_Np>;
 
 1190        static constexpr size_t _S_simd_align
 
 1191          = std::__bit_ceil(_Np * sizeof(_Tp));
 
 1193        static constexpr size_t _S_mask_align = alignof(_MaskMember);
 
 1202          _SimdBase(const _SimdBase&) {}
 
 1204          _SimdBase() = default;
 
 1207          operator const _SimdMember &() const
 
 1208          { return static_cast<const simd<_Tp, _Fixed>*>(this)->_M_data; }
 
 1211          operator array<_Tp, _Np>() const
 
 1213            array<_Tp, _Np> __r;
 
 1215            static_assert(
sizeof(__r) <= 
sizeof(_SimdMember), 
"");
 
 1216            __builtin_memcpy(__r.data(), &
static_cast<const _SimdMember&
>(*
this),
 
 1225        struct _MaskBase {};
 
 1229        struct _SimdCastType
 
 1232          _SimdCastType(
const array<_Tp, _Np>&);
 
 1235          _SimdCastType(
const _SimdMember& dd) : _M_data(dd) {}
 
 1238          operator const _SimdMember &() 
const { 
return _M_data; }
 
 1241          const _SimdMember& _M_data;
 
 1248          _MaskCastType() = 
delete;
 
 1257struct _CommonImplFixedSize
 
 1260  template <
typename _Tp, 
typename... _As>
 
 1261    _GLIBCXX_SIMD_INTRINSIC 
static void 
 1262    _S_store(
const _SimdTuple<_Tp, _As...>& __x, 
void* __addr)
 
 1264      constexpr size_t _Np = _SimdTuple<_Tp, _As...>::_S_size();
 
 1265      __builtin_memcpy(__addr, &__x, _Np * 
sizeof(_Tp));
 
 1276  struct _SimdImplFixedSize
 
 1279    using _MaskMember = _SanitizedBitMask<_Np>;
 
 1281    template <
typename _Tp>
 
 1282      using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
 
 1284    template <
typename _Tp>
 
 1285      static constexpr size_t _S_tuple_size = _SimdMember<_Tp>::_S_tuple_size;
 
 1287    template <
typename _Tp>
 
 1288      using _Simd = simd<_Tp, simd_abi::fixed_size<_Np>>;
 
 1290    template <
typename _Tp>
 
 1291      using _TypeTag = _Tp*;
 
 1294    template <
typename _Tp>
 
 1295      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdMember<_Tp>
 
 1296      _S_broadcast(_Tp __x) 
noexcept 
 1298        return _SimdMember<_Tp>::_S_generate(
 
 1299                 [&](
auto __meta) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1300                   return __meta._S_broadcast(__x);
 
 1305    template <
typename _Fp, 
typename _Tp>
 
 1306      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdMember<_Tp>
 
 1307      _S_generator(_Fp&& __gen, _TypeTag<_Tp>)
 
 1309        return _SimdMember<_Tp>::_S_generate(
 
 1310                 [&__gen](
auto __meta) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1311                   return __meta._S_generator(
 
 1312                            [&](
auto __i) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1313                              return __i < _Np ? __gen(_SizeConstant<__meta._S_offset + __i>())
 
 1321    template <
typename _Tp, 
typename _Up>
 
 1322      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdMember<_Tp>
 
 1323      _S_load(
const _Up* __mem, _TypeTag<_Tp>) 
noexcept 
 1325        return _SimdMember<_Tp>::_S_generate(
 
 1326                 [&](
auto __meta) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1327                   return __meta._S_load(&__mem[__meta._S_offset], _TypeTag<_Tp>());
 
 1332    template <
typename _Tp, 
typename... _As, 
typename _Up>
 
 1333      _GLIBCXX_SIMD_INTRINSIC 
static _SimdTuple<_Tp, _As...>
 
 1334      _S_masked_load(
const _SimdTuple<_Tp, _As...>& __old,
 
 1335                     const _MaskMember __bits, 
const _Up* __mem) 
noexcept 
 1337        auto __merge = __old;
 
 1338        __for_each(__merge, [&](
auto __meta, 
auto& __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1339          if (__meta._S_submask(__bits).any())
 
 1340#pragma GCC diagnostic push
 
 1345#pragma GCC diagnostic ignored 
"-Warray-bounds" 
 1347              = __meta._S_masked_load(__native, __meta._S_make_mask(__bits),
 
 1348                                      __mem + __meta._S_offset);
 
 1349#pragma GCC diagnostic pop 
 1355    template <
typename _Tp, 
typename _Up>
 
 1356      _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 1357      _S_store(
const _SimdMember<_Tp>& __v, _Up* __mem, _TypeTag<_Tp>) 
noexcept 
 1359        __for_each(__v, [&](
auto __meta, 
auto __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1360          __meta._S_store(__native, &__mem[__meta._S_offset], _TypeTag<_Tp>());
 
 1365    template <
typename _Tp, 
typename... _As, 
typename _Up>
 
 1366      _GLIBCXX_SIMD_INTRINSIC 
static void 
 1367      _S_masked_store(
const _SimdTuple<_Tp, _As...>& __v, _Up* __mem,
 
 1368                      const _MaskMember __bits) 
noexcept 
 1370        __for_each(__v, [&](
auto __meta, 
auto __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1371          if (__meta._S_submask(__bits).any())
 
 1372#pragma GCC diagnostic push
 
 1377#pragma GCC diagnostic ignored 
"-Warray-bounds" 
 1378            __meta._S_masked_store(__native, __mem + __meta._S_offset,
 
 1379                                   __meta._S_make_mask(__bits));
 
 1380#pragma GCC diagnostic pop 
 1385    template <
typename _Tp, 
typename... _As>
 
 1386      static constexpr inline _MaskMember
 
 1387      _S_negate(
const _SimdTuple<_Tp, _As...>& __x) 
noexcept 
 1389        _MaskMember __bits = 0;
 
 1391          __x, [&__bits](
auto __meta, 
auto __native) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1393              |= __meta._S_mask_to_shifted_ullong(__meta._S_negate(__native));
 
 1399    template <
typename _Tp, 
typename _BinaryOperation>
 
 1400      static constexpr inline _Tp _S_reduce(
const _Simd<_Tp>& __x,
 
 1401                                            const _BinaryOperation& __binary_op)
 
 1403        using _Tup = _SimdMember<_Tp>;
 
 1404        const _Tup& __tup = __data(__x);
 
 1405        if constexpr (_Tup::_S_tuple_size == 1)
 
 1406          return _Tup::_FirstAbi::_SimdImpl::_S_reduce(
 
 1407            __tup.template _M_simd_at<0>(), __binary_op);
 
 1408        else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 2
 
 1409                           && _Tup::_SecondType::_S_size() == 1)
 
 1411            return __binary_op(simd<_Tp, simd_abi::scalar>(
 
 1412                                 reduce(__tup.template _M_simd_at<0>(),
 
 1414                               __tup.template _M_simd_at<1>())[0];
 
 1416        else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 4
 
 1417                           && _Tup::_SecondType::_S_size() == 2)
 
 1420              simd<_Tp, simd_abi::scalar>(
 
 1421                reduce(__tup.template _M_simd_at<0>(), __binary_op)),
 
 1422              simd<_Tp, simd_abi::scalar>(
 
 1423                reduce(__tup.template _M_simd_at<1>(), __binary_op)))[0];
 
 1427            const auto& __x2 = __call_with_n_evaluations<
 
 1428              __div_roundup(_Tup::_S_tuple_size, 2)>(
 
 1429              [](
auto __first_simd, 
auto... __remaining) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1430                if constexpr (
sizeof...(__remaining) == 0)
 
 1431                  return __first_simd;
 
 1436                                   typename decltype(__first_simd)::abi_type,
 
 1437                                   typename decltype(__remaining)::abi_type...>;
 
 1438                    return fixed_size_simd<_Tp, _Tup2::_S_size()>(
 
 1440                      __make_simd_tuple(__first_simd, __remaining...));
 
 1443              [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1444                auto __left = __tup.template _M_simd_at<2 * __i>();
 
 1445                if constexpr (2 * __i + 1 == _Tup::_S_tuple_size)
 
 1449                    auto __right = __tup.template _M_simd_at<2 * __i + 1>();
 
 1450                    using _LT = 
decltype(__left);
 
 1451                    using _RT = 
decltype(__right);
 
 1452                    if constexpr (_LT::size() == _RT::size())
 
 1453                      return __binary_op(__left, __right);
 
 1456                        _GLIBCXX_SIMD_USE_CONSTEXPR_API
 
 1457                        typename _LT::mask_type __k(
 
 1459                          [](
auto __j) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1460                            return __j < _RT::size();
 
 1462                        _LT __ext_right = __left;
 
 1463                        where(__k, __ext_right)
 
 1464                          = __proposed::resizing_simd_cast<_LT>(__right);
 
 1465                        where(__k, __left) = __binary_op(__left, __ext_right);
 
 1470            return reduce(__x2, __binary_op);
 
 1475    template <
typename _Tp, 
typename... _As>
 
 1476      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1477      _S_min(
const _SimdTuple<_Tp, _As...>& __a, 
const _SimdTuple<_Tp, _As...>& __b)
 
 1479        return __a._M_apply_per_chunk(
 
 1480          [](
auto __impl, 
auto __aa, 
auto __bb) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1481            return __impl._S_min(__aa, __bb);
 
 1486    template <
typename _Tp, 
typename... _As>
 
 1487      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1488      _S_max(
const _SimdTuple<_Tp, _As...>& __a, 
const _SimdTuple<_Tp, _As...>& __b)
 
 1490        return __a._M_apply_per_chunk(
 
 1491          [](
auto __impl, 
auto __aa, 
auto __bb) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1492            return __impl._S_max(__aa, __bb);
 
 1498    template <
typename _Tp, 
typename... _As>
 
 1499      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1500      _S_complement(
const _SimdTuple<_Tp, _As...>& __x) 
noexcept 
 1502        return __x._M_apply_per_chunk(
 
 1503                 [](
auto __impl, 
auto __xx) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1504                   return __impl._S_complement(__xx);
 
 1509    template <
typename _Tp, 
typename... _As>
 
 1510      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1511      _S_unary_minus(
const _SimdTuple<_Tp, _As...>& __x) 
noexcept 
 1513        return __x._M_apply_per_chunk(
 
 1514                 [](
auto __impl, 
auto __xx) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1515                   return __impl._S_unary_minus(__xx);
 
 1521#define _GLIBCXX_SIMD_FIXED_OP(name_, op_)                                                     \ 
 1522    template <typename _Tp, typename... _As>                                                   \ 
 1523      _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple<_Tp, _As...> name_(                  \ 
 1524        const _SimdTuple<_Tp, _As...>& __x, const _SimdTuple<_Tp, _As...>& __y)                \ 
 1526        return __x._M_apply_per_chunk(                                                         \ 
 1527          [](auto __impl, auto __xx, auto __yy) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \ 
 1528            return __impl.name_(__xx, __yy);                                                   \ 
 1533    _GLIBCXX_SIMD_FIXED_OP(_S_plus, +)
 
 1534    _GLIBCXX_SIMD_FIXED_OP(_S_minus, -)
 
 1535    _GLIBCXX_SIMD_FIXED_OP(_S_multiplies, *)
 
 1536    _GLIBCXX_SIMD_FIXED_OP(_S_divides, /)
 
 1537    _GLIBCXX_SIMD_FIXED_OP(_S_modulus, %)
 
 1538    _GLIBCXX_SIMD_FIXED_OP(_S_bit_and, &)
 
 1539    _GLIBCXX_SIMD_FIXED_OP(_S_bit_or, |)
 
 1540    _GLIBCXX_SIMD_FIXED_OP(_S_bit_xor, ^)
 
 1541    _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_left, <<)
 
 1542    _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_right, >>)
 
 1543#undef _GLIBCXX_SIMD_FIXED_OP 
 1545    template <
typename _Tp, 
typename... _As>
 
 1546      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1547      _S_bit_shift_left(
const _SimdTuple<_Tp, _As...>& __x, 
int __y)
 
 1549        return __x._M_apply_per_chunk(
 
 1550                 [__y](
auto __impl, 
auto __xx) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1551                   return __impl._S_bit_shift_left(__xx, __y);
 
 1555    template <
typename _Tp, 
typename... _As>
 
 1556      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1557      _S_bit_shift_right(
const _SimdTuple<_Tp, _As...>& __x, 
int __y)
 
 1559        return __x._M_apply_per_chunk(
 
 1560                 [__y](
auto __impl, 
auto __xx) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1561                   return __impl._S_bit_shift_right(__xx, __y);
 
 1566#define _GLIBCXX_SIMD_APPLY_ON_TUPLE(_RetTp, __name)                           \ 
 1567    template <typename _Tp, typename... _As, typename... _More>                \ 
 1568      static inline __fixed_size_storage_t<_RetTp, _Np>                        \ 
 1569        _S_##__name(const _SimdTuple<_Tp, _As...>& __x,                        \ 
 1570                    const _More&... __more)                                    \ 
 1572        if constexpr (sizeof...(_More) == 0)                                   \ 
 1574            if constexpr (is_same_v<_Tp, _RetTp>)                              \ 
 1575              return __x._M_apply_per_chunk(                                   \ 
 1576                       [](auto __impl, auto __xx)                              \ 
 1577                         constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA          \ 
 1579                         using _V = typename decltype(__impl)::simd_type;      \ 
 1580                         return __data(__name(_V(__private_init, __xx)));      \ 
 1583              return __optimize_simd_tuple(                                    \ 
 1584                       __x.template _M_apply_r<_RetTp>(                        \ 
 1585                         [](auto __impl, auto __xx)                            \ 
 1586                           _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA                  \ 
 1587                         { return __impl._S_##__name(__xx); }));               \ 
 1589        else if constexpr (                                                    \ 
 1592            _RetTp> && (... && is_same_v<_SimdTuple<_Tp, _As...>, _More>) )    \ 
 1593          return __x._M_apply_per_chunk(                                       \ 
 1594                   [](auto __impl, auto __xx, auto... __pack)                  \ 
 1595                     constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA              \ 
 1597                     using _V = typename decltype(__impl)::simd_type;          \ 
 1598                     return __data(__name(_V(__private_init, __xx),            \ 
 1599                                          _V(__private_init, __pack)...));     \ 
 1601        else if constexpr (is_same_v<_Tp, _RetTp>)                             \ 
 1602          return __x._M_apply_per_chunk(                                       \ 
 1603                   [](auto __impl, auto __xx, auto... __pack)                  \ 
 1604                     constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA              \ 
 1606                     using _V = typename decltype(__impl)::simd_type;          \ 
 1607                     return __data(__name(_V(__private_init, __xx),            \ 
 1608                                          __autocvt_to_simd(__pack)...));      \ 
 1611          __assert_unreachable<_Tp>();                                         \ 
 1614    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acos)
 
 1615    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asin)
 
 1616    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan)
 
 1617    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan2)
 
 1618    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cos)
 
 1619    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sin)
 
 1620    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tan)
 
 1621    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acosh)
 
 1622    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asinh)
 
 1623    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atanh)
 
 1624    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cosh)
 
 1625    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sinh)
 
 1626    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tanh)
 
 1627    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp)
 
 1628    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp2)
 
 1629    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, expm1)
 
 1630    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
int, ilogb)
 
 1631    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log)
 
 1632    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log10)
 
 1633    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log1p)
 
 1634    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log2)
 
 1635    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, logb)
 
 1637    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp,
 
 1639    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, scalbln)
 
 1640    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cbrt)
 
 1641    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, abs)
 
 1642    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fabs)
 
 1643    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, pow)
 
 1644    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sqrt)
 
 1645    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erf)
 
 1646    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erfc)
 
 1647    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, lgamma)
 
 1648    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tgamma)
 
 1649    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, trunc)
 
 1650    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ceil)
 
 1651    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, floor)
 
 1652    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nearbyint)
 
 1654    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, rint)
 
 1655    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
long, lrint)
 
 1656    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
long long, llrint)
 
 1658    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, round)
 
 1659    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
long, lround)
 
 1660    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
long long, llround)
 
 1662    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ldexp)
 
 1663    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmod)
 
 1664    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, remainder)
 
 1666    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nextafter)
 
 1667    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fdim)
 
 1668    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmax)
 
 1669    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmin)
 
 1670    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fma)
 
 1671    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
int, fpclassify)
 
 1672#undef _GLIBCXX_SIMD_APPLY_ON_TUPLE 
 1674    template <
typename _Tp, 
typename... _Abis>
 
 1675      static inline _SimdTuple<_Tp, _Abis...>
 
 1676      _S_remquo(
const _SimdTuple<_Tp, _Abis...>& __x, 
const _SimdTuple<_Tp, _Abis...>& __y,
 
 1677                __fixed_size_storage_t<
int, _SimdTuple<_Tp, _Abis...>::_S_size()>* __z)
 
 1679        return __x._M_apply_per_chunk(
 
 1680                 [](
auto __impl, 
const auto __xx, 
const auto __yy, 
auto& __zz)
 
 1681                   _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
 
 1682                 { 
return __impl._S_remquo(__xx, __yy, &__zz); },
 
 1686    template <
typename _Tp, 
typename... _As>
 
 1687      static inline _SimdTuple<_Tp, _As...>
 
 1688      _S_frexp(
const _SimdTuple<_Tp, _As...>& __x,
 
 1689               __fixed_size_storage_t<int, _Np>& __exp) 
noexcept 
 1691        return __x._M_apply_per_chunk(
 
 1692                 [](
auto __impl, 
const auto& __a, 
auto& __b) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1693                   return __data(frexp(
typename decltype(__impl)::simd_type(__private_init, __a),
 
 1694                                       __autocvt_to_simd(__b)));
 
 1698#define _GLIBCXX_SIMD_TEST_ON_TUPLE_(name_)                                              \ 
 1699    template <typename _Tp, typename... _As>                                             \ 
 1700      static inline _MaskMember                                                          \ 
 1701      _S_##name_(const _SimdTuple<_Tp, _As...>& __x) noexcept                            \ 
 1703        return _M_test([] (auto __impl, auto __xx) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA  { \ 
 1704                 return __impl._S_##name_(__xx);                                         \ 
 1708    _GLIBCXX_SIMD_TEST_ON_TUPLE_(isinf)
 
 1709    _GLIBCXX_SIMD_TEST_ON_TUPLE_(isfinite)
 
 1710    _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnan)
 
 1711    _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnormal)
 
 1712    _GLIBCXX_SIMD_TEST_ON_TUPLE_(signbit)
 
 1713#undef _GLIBCXX_SIMD_TEST_ON_TUPLE_ 
 1716    template <
typename... _Ts>
 
 1717      _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 1718      _S_increment(_SimdTuple<_Ts...>& __x)
 
 1721          __x, [](
auto __meta, 
auto& native) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1722            __meta._S_increment(native);
 
 1726    template <
typename... _Ts>
 
 1727      _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 1728      _S_decrement(_SimdTuple<_Ts...>& __x)
 
 1731          __x, [](
auto __meta, 
auto& native) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1732            __meta._S_decrement(native);
 
 1737#define _GLIBCXX_SIMD_CMP_OPERATIONS(__cmp)                                    \ 
 1738    template <typename _Tp, typename... _As>                                   \ 
 1739      _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember                     \ 
 1740      __cmp(const _SimdTuple<_Tp, _As...>& __x,                                \ 
 1741            const _SimdTuple<_Tp, _As...>& __y)                                \ 
 1743        return _M_test([](auto __impl, auto __xx, auto __yy)                   \ 
 1744                         constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA          \ 
 1745                       { return __impl.__cmp(__xx, __yy); },                   \ 
 1749    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_equal_to)
 
 1750    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_not_equal_to)
 
 1751    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less)
 
 1752    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less_equal)
 
 1753    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isless)
 
 1754    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessequal)
 
 1755    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreater)
 
 1756    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreaterequal)
 
 1757    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessgreater)
 
 1758    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isunordered)
 
 1759#undef _GLIBCXX_SIMD_CMP_OPERATIONS 
 1762    template <
typename _Tp, 
typename... _As, 
typename _Up>
 
 1763      _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 1764      _S_set(_SimdTuple<_Tp, _As...>& __v, 
int __i, _Up&& __x) 
noexcept 
 1765      { __v._M_set(__i, 
static_cast<_Up&&
>(__x)); }
 
 1768    template <
typename _Tp, 
typename... _As>
 
 1769      _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 1770      _S_masked_assign(
const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
 
 1771                       const __type_identity_t<_SimdTuple<_Tp, _As...>>& __rhs)
 
 1773        __for_each(__lhs, __rhs,
 
 1774                   [&](
auto __meta, 
auto& __native_lhs, 
auto __native_rhs)
 
 1775                     constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
 
 1777                     __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
 
 1784    template <
typename _Tp, 
typename... _As>
 
 1785      _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 1786      _S_masked_assign(
const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
 
 1787                       const __type_identity_t<_Tp> __rhs)
 
 1790          __lhs, [&](
auto __meta, 
auto& __native_lhs) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1791            __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
 
 1797    template <
typename _Op, 
typename _Tp, 
typename... _As>
 
 1798      static constexpr inline void 
 1799      _S_masked_cassign(
const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
 
 1800                        const _SimdTuple<_Tp, _As...>& __rhs, _Op __op)
 
 1802        __for_each(__lhs, __rhs,
 
 1803                   [&](
auto __meta, 
auto& __native_lhs, 
auto __native_rhs)
 
 1804                     constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
 
 1806                     __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
 
 1807                                                       __native_lhs, __native_rhs, __op);
 
 1813    template <
typename _Op, 
typename _Tp, 
typename... _As>
 
 1814      static constexpr inline void 
 1815      _S_masked_cassign(
const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
 
 1816                        const _Tp& __rhs, _Op __op)
 
 1819          __lhs, [&](
auto __meta, 
auto& __native_lhs) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1820            __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
 
 1821                                              __native_lhs, __rhs, __op);
 
 1826    template <
template <
typename> 
class _Op, 
typename _Tp, 
typename... _As>
 
 1827      static constexpr inline _SimdTuple<_Tp, _As...>
 
 1828      _S_masked_unary(
const _MaskMember __bits,
 
 1829                      const _SimdTuple<_Tp, _As...> __v) 
 
 1831        return __v._M_apply_wrapped([&__bits](
auto __meta,
 
 1832                                              auto __native) 
constexpr {
 
 1833          return __meta.template _S_masked_unary<_Op>(__meta._S_make_mask(
 
 1844  struct _MaskImplFixedSize
 
 1847      sizeof(_ULLong) * __CHAR_BIT__ >= _Np,
 
 1848      "The fixed_size implementation relies on one _ULLong being able to store " 
 1849      "all boolean elements."); 
 
 1852    using _Abi = simd_abi::fixed_size<_Np>;
 
 1854    using _MaskMember = _SanitizedBitMask<_Np>;
 
 1856    template <
typename _Tp>
 
 1857      using _FirstAbi = 
typename __fixed_size_storage_t<_Tp, _Np>::_FirstAbi;
 
 1859    template <
typename _Tp>
 
 1860      using _TypeTag = _Tp*;
 
 1865      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1866      _S_broadcast(
bool __x)
 
 1867      { 
return __x ? ~_MaskMember() : _MaskMember(); }
 
 1872      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1873      _S_load(
const bool* __mem)
 
 1875        if (__builtin_is_constant_evaluated())
 
 1878            for (
size_t __i = 0; __i < _Np; ++__i)
 
 1879              __r.set(__i, __mem[__i]);
 
 1882        using _Ip = __int_for_sizeof_t<bool>;
 
 1886        const simd<_Ip, _Abi> __bools(
reinterpret_cast<const __may_alias<_Ip>*
>(
 
 1889        return __data(__bools != 0);
 
 1894    template <
bool _Sanitized>
 
 1895      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SanitizedBitMask<_Np>
 
 1896      _S_to_bits(_BitMask<_Np, _Sanitized> __x)
 
 1898        if constexpr (_Sanitized)
 
 1901          return __x._M_sanitized();
 
 1906    template <
typename _Tp, 
typename _Up, 
typename _UAbi>
 
 1907      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1908      _S_convert(simd_mask<_Up, _UAbi> __x)
 
 1910        return _UAbi::_MaskImpl::_S_to_bits(__data(__x))
 
 1911          .template _M_extract<0, _Np>();
 
 1916    template <
typename _Tp>
 
 1917      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1918      _S_from_bitmask(_MaskMember __bits, _TypeTag<_Tp>) 
noexcept 
 1922    static constexpr inline _MaskMember
 
 1923    _S_load(
const bool* __mem) 
noexcept 
 1928      using _Vs = __fixed_size_storage_t<_UChar, _Np>;
 
 1929      __for_each(_Vs{}, [&](
auto __meta, 
auto) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1930        __r |= __meta._S_mask_to_shifted_ullong(
 
 1931          __meta._S_mask_impl._S_load(&__mem[__meta._S_offset],
 
 1932                                      _SizeConstant<__meta._S_size()>()));
 
 1938    static constexpr inline _MaskMember
 
 1939    _S_masked_load(_MaskMember __merge, _MaskMember __mask, 
const bool* __mem) 
noexcept 
 1941      _BitOps::_S_bit_iteration(__mask.to_ullong(),
 
 1942                                [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1943                                  __merge.set(__i, __mem[__i]);
 
 1949    static constexpr inline void 
 1950    _S_store(
const _MaskMember __bitmask, 
bool* __mem) 
noexcept 
 1952      if constexpr (_Np == 1)
 
 1953        __mem[0] = __bitmask[0];
 
 1955        _FirstAbi<_UChar>::_CommonImpl::_S_store_bool_array(__bitmask, __mem);
 
 1959    static constexpr inline void 
 1960    _S_masked_store(
const _MaskMember __v, 
bool* __mem, 
const _MaskMember __k) 
noexcept 
 1962      _BitOps::_S_bit_iteration(
 
 1963        __k, [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { __mem[__i] = __v[__i]; });
 
 1967    _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1968    _S_logical_and(
const _MaskMember& __x, 
const _MaskMember& __y) 
noexcept 
 1969    { 
return __x & __y; }
 
 1971    _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1972    _S_logical_or(
const _MaskMember& __x, 
const _MaskMember& __y) 
noexcept 
 1973    { 
return __x | __y; }
 
 1975    _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1976    _S_bit_not(
const _MaskMember& __x) 
noexcept 
 1979    _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1980    _S_bit_and(
const _MaskMember& __x, 
const _MaskMember& __y) 
noexcept 
 1981    { 
return __x & __y; }
 
 1983    _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1984    _S_bit_or(
const _MaskMember& __x, 
const _MaskMember& __y) 
noexcept 
 1985    { 
return __x | __y; }
 
 1987    _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1988    _S_bit_xor(
const _MaskMember& __x, 
const _MaskMember& __y) 
noexcept 
 1989    { 
return __x ^ __y; }
 
 1992    _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 1993    _S_set(_MaskMember& __k, 
int __i, 
bool __x) 
noexcept 
 1994    { __k.set(__i, __x); }
 
 1997    _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 1998    _S_masked_assign(
const _MaskMember __k, _MaskMember& __lhs, 
const _MaskMember __rhs)
 
 1999    { __lhs = (__lhs & ~__k) | (__rhs & __k); }
 
 2002    _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 2003    _S_masked_assign(
const _MaskMember __k, _MaskMember& __lhs, 
const bool __rhs)
 
 2013    template <
typename _Tp>
 
 2014      _GLIBCXX_SIMD_INTRINSIC 
static constexpr bool 
 2015      _S_all_of(simd_mask<_Tp, _Abi> __k)
 
 2016      { 
return __data(__k).all(); }
 
 2020    template <
typename _Tp>
 
 2021      _GLIBCXX_SIMD_INTRINSIC 
static constexpr bool 
 2022      _S_any_of(simd_mask<_Tp, _Abi> __k)
 
 2023      { 
return __data(__k).any(); }
 
 2027    template <
typename _Tp>
 
 2028      _GLIBCXX_SIMD_INTRINSIC 
static constexpr bool 
 2029      _S_none_of(simd_mask<_Tp, _Abi> __k)
 
 2030      { 
return __data(__k).none(); }
 
 2034    template <
typename _Tp>
 
 2035      _GLIBCXX_SIMD_INTRINSIC 
static constexpr bool 
 2036      _S_some_of([[maybe_unused]] simd_mask<_Tp, _Abi> __k)
 
 2038        if constexpr (_Np == 1)
 
 2041          return __data(__k).any() && !__data(__k).all();
 
 2046    template <
typename _Tp>
 
 2047      _GLIBCXX_SIMD_INTRINSIC 
static constexpr int 
 2048      _S_popcount(simd_mask<_Tp, _Abi> __k)
 
 2049      { 
return __data(__k).count(); }
 
 2053    template <
typename _Tp>
 
 2054      _GLIBCXX_SIMD_INTRINSIC 
static constexpr int 
 2055      _S_find_first_set(simd_mask<_Tp, _Abi> __k)
 
 2056      { 
return std::__countr_zero(__data(__k).to_ullong()); }
 
 2060    template <
typename _Tp>
 
 2061      _GLIBCXX_SIMD_INTRINSIC 
static constexpr int 
 2062      _S_find_last_set(simd_mask<_Tp, _Abi> __k)
 
 2063      { 
return std::__bit_width(__data(__k).to_ullong()) - 1; }
 
 2069_GLIBCXX_SIMD_END_NAMESPACE
 
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
integer_sequence< size_t, _Idx... > index_sequence
Alias template index_sequence.
constexpr _Iterator __base(_Iterator __it)