39#ifndef _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_ 
   40#define _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_ 
   42#if __cplusplus >= 201703L 
   46_GLIBCXX_SIMD_BEGIN_NAMESPACE
 
   49template <
size_t _I, 
typename _Tp>
 
   50  struct __simd_tuple_element;
 
   52template <
typename _Tp, 
typename _A0, 
typename... _As>
 
   53  struct __simd_tuple_element<0, _SimdTuple<_Tp, _A0, _As...>>
 
   54  { 
using type = simd<_Tp, _A0>; };
 
   56template <
size_t _I, 
typename _Tp, 
typename _A0, 
typename... _As>
 
   57  struct __simd_tuple_element<_I, _SimdTuple<_Tp, _A0, _As...>>
 
   58  { 
using type = 
typename __simd_tuple_element<_I - 1, _SimdTuple<_Tp, _As...>>::type; };
 
   60template <
size_t _I, 
typename _Tp>
 
   61  using __simd_tuple_element_t = 
typename __simd_tuple_element<_I, _Tp>::type;
 
   66template <
typename _Tp, 
typename... _A0s, 
typename... _A1s>
 
   67  _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple<_Tp, _A0s..., _A1s...>
 
   68  __simd_tuple_concat(
const _SimdTuple<_Tp, _A0s...>& __left,
 
   69                      const _SimdTuple<_Tp, _A1s...>& __right)
 
   71    if constexpr (
sizeof...(_A0s) == 0)
 
   73    else if constexpr (
sizeof...(_A1s) == 0)
 
   76      return {__left.first, __simd_tuple_concat(__left.second, __right)};
 
   79template <
typename _Tp, 
typename _A10, 
typename... _A1s>
 
   80  _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple<_Tp, simd_abi::scalar, _A10, _A1s...>
 
   81  __simd_tuple_concat(
const _Tp& __left, 
const _SimdTuple<_Tp, _A10, _A1s...>& __right)
 
   82  { 
return {__left, __right}; }
 
   88template <
size_t _Np, 
typename _Tp>
 
   89  _GLIBCXX_SIMD_INTRINSIC 
constexpr decltype(
auto)
 
   90  __simd_tuple_pop_front(_Tp&& __x)
 
   92    if constexpr (_Np == 0)
 
   93      return static_cast<_Tp&&
>(__x);
 
   96        using _Up = __remove_cvref_t<_Tp>;
 
   97        static_assert(_Np >= _Up::_S_first_size);
 
   98        return __simd_tuple_pop_front<_Np - _Up::_S_first_size>(__x.second);
 
  106struct __as_simd_tuple {};
 
  108template <
typename _Tp, 
typename _A0, 
typename... _Abis>
 
  109  _GLIBCXX_SIMD_INTRINSIC 
constexpr simd<_Tp, _A0>
 
  110  __simd_tuple_get_impl(__as_simd, 
const _SimdTuple<_Tp, _A0, _Abis...>& __t, _SizeConstant<0>)
 
  111  { 
return {__private_init, __t.first}; }
 
  113template <
typename _Tp, 
typename _A0, 
typename... _Abis>
 
  114  _GLIBCXX_SIMD_INTRINSIC 
constexpr const auto&
 
  115  __simd_tuple_get_impl(__as_simd_tuple, 
const _SimdTuple<_Tp, _A0, _Abis...>& __t,
 
  117  { 
return __t.first; }
 
  119template <
typename _Tp, 
typename _A0, 
typename... _Abis>
 
  120  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto&
 
  121  __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _A0, _Abis...>& __t, _SizeConstant<0>)
 
  122  { 
return __t.first; }
 
  124template <
typename _R, 
size_t _Np, 
typename _Tp, 
typename... _Abis>
 
  125  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto 
  126  __simd_tuple_get_impl(_R, 
const _SimdTuple<_Tp, _Abis...>& __t, _SizeConstant<_Np>)
 
  127  { 
return __simd_tuple_get_impl(_R(), __t.second, _SizeConstant<_Np - 1>()); }
 
  129template <
size_t _Np, 
typename _Tp, 
typename... _Abis>
 
  130  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto&
 
  131  __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _Abis...>& __t, _SizeConstant<_Np>)
 
  132  { 
return __simd_tuple_get_impl(__as_simd_tuple(), __t.second, _SizeConstant<_Np - 1>()); }
 
  134template <
size_t _Np, 
typename _Tp, 
typename... _Abis>
 
  135  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto 
  136  __get_simd_at(
const _SimdTuple<_Tp, _Abis...>& __t)
 
  137  { 
return __simd_tuple_get_impl(__as_simd(), __t, _SizeConstant<_Np>()); }
 
  141template <
size_t _Np, 
typename _Tp, 
typename... _Abis>
 
  142  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto 
  143  __get_tuple_at(
const _SimdTuple<_Tp, _Abis...>& __t)
 
  144  { 
return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>()); }
 
  146template <
size_t _Np, 
typename _Tp, 
typename... _Abis>
 
  147  _GLIBCXX_SIMD_INTRINSIC 
constexpr auto&
 
  148  __get_tuple_at(_SimdTuple<_Tp, _Abis...>& __t)
 
  149  { 
return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>()); }
 
  152template <
typename _Tp, 
typename _Abi, 
size_t _Offset>
 
  153  struct __tuple_element_meta : 
public _Abi::_SimdImpl
 
  155    static_assert(is_same_v<
typename _Abi::_SimdImpl::abi_type,
 
  158    using value_type = _Tp;
 
  159    using abi_type = _Abi;
 
  160    using _Traits = _SimdTraits<_Tp, _Abi>;
 
  161    using _MaskImpl = 
typename _Abi::_MaskImpl;
 
  162    using _MaskMember = 
typename _Traits::_MaskMember;
 
  163    using simd_type = simd<_Tp, _Abi>;
 
  164    static constexpr size_t _S_offset = _Offset;
 
  165    static constexpr size_t _S_size() { 
return simd_size<_Tp, _Abi>::value; }
 
  166    static constexpr _MaskImpl _S_mask_impl = {};
 
  168    template <
size_t _Np, 
bool _Sanitized>
 
  169      _GLIBCXX_SIMD_INTRINSIC 
static auto 
  170      _S_submask(_BitMask<_Np, _Sanitized> __bits)
 
  171      { 
return __bits.template _M_extract<_Offset, _S_size()>(); }
 
  173    template <
size_t _Np, 
bool _Sanitized>
 
  174      _GLIBCXX_SIMD_INTRINSIC 
static _MaskMember
 
  175      _S_make_mask(_BitMask<_Np, _Sanitized> __bits)
 
  177        return _MaskImpl::template _S_convert<_Tp>(
 
  178          __bits.template _M_extract<_Offset, _S_size()>()._M_sanitized());
 
  181    _GLIBCXX_SIMD_INTRINSIC 
static _ULLong
 
  182    _S_mask_to_shifted_ullong(_MaskMember __k)
 
  183    { 
return _MaskImpl::_S_to_bits(__k).to_ullong() << _Offset; }
 
  186template <
size_t _Offset, 
typename _Tp, 
typename _Abi, 
typename... _As>
 
  187  _GLIBCXX_SIMD_INTRINSIC
 
  188  __tuple_element_meta<_Tp, _Abi, _Offset>
 
  189  __make_meta(
const _SimdTuple<_Tp, _Abi, _As...>&)
 
  194template <
size_t _Offset, 
typename _Base>
 
  195  struct _WithOffset : 
public _Base
 
  197    static inline constexpr size_t _S_offset = _Offset;
 
  199    _GLIBCXX_SIMD_INTRINSIC 
char*
 
  201    { 
return reinterpret_cast<char*
>(
this) + _S_offset * 
sizeof(
typename _Base::value_type); }
 
  203    _GLIBCXX_SIMD_INTRINSIC 
const char*
 
  204    _M_as_charptr()
 const 
  205    { 
return reinterpret_cast<const char*
>(
this) + _S_offset * 
sizeof(
typename _Base::value_type); }
 
  209template <
size_t _O0, 
size_t _O1, 
typename _Base>
 
  210  struct _WithOffset<_O0, _WithOffset<_O1, _Base>> {};
 
  212template <
size_t _Offset, 
typename _Tp>
 
  213  _GLIBCXX_SIMD_INTRINSIC
 
  215  __add_offset(_Tp& __base)
 
  216  { 
return static_cast<_WithOffset<_Offset, __remove_cvref_t<_Tp>
>&>(
__base); }
 
  218template <
size_t _Offset, 
typename _Tp>
 
  219  _GLIBCXX_SIMD_INTRINSIC
 
  221  __add_offset(
const _Tp& __base)
 
  222  { 
return static_cast<const _WithOffset<_Offset, __remove_cvref_t<_Tp>
>&>(
__base); }
 
  224template <
size_t _Offset, 
size_t _ExistingOffset, 
typename _Tp>
 
  225  _GLIBCXX_SIMD_INTRINSIC
 
  227  __add_offset(_WithOffset<_ExistingOffset, _Tp>& __base)
 
  228  { 
return static_cast<_WithOffset<_Offset + _ExistingOffset, _Tp>&
>(
static_cast<_Tp&
>(
__base)); }
 
  230template <
size_t _Offset, 
size_t _ExistingOffset, 
typename _Tp>
 
  231  _GLIBCXX_SIMD_INTRINSIC
 
  233  __add_offset(
const _WithOffset<_ExistingOffset, _Tp>& __base)
 
  235    return static_cast<const _WithOffset<_Offset + _ExistingOffset, _Tp>&
>(
 
  236      static_cast<const _Tp&
>(
__base));
 
  239template <
typename _Tp>
 
  240  constexpr inline size_t __offset = 0;
 
  242template <
size_t _Offset, 
typename _Tp>
 
  243  constexpr inline size_t __offset<_WithOffset<_Offset, _Tp>>
 
  244    = _WithOffset<_Offset, _Tp>::_S_offset;
 
  246template <
typename _Tp>
 
  247  constexpr inline size_t __offset<const _Tp> = __offset<_Tp>;
 
  249template <
typename _Tp>
 
  250  constexpr inline size_t __offset<_Tp&> = __offset<_Tp>;
 
  252template <
typename _Tp>
 
  253  constexpr inline size_t __offset<_Tp&&> = __offset<_Tp>;
 
  258template <
typename _Tp>
 
  259  struct _SimdTuple<_Tp>
 
  261    using value_type = _Tp;
 
  262    static constexpr size_t _S_tuple_size = 0;
 
  263    static constexpr size_t _S_size() { 
return 0; }
 
  267template <
typename _FirstType, 
typename _SecondType>
 
  268  struct _SimdTupleData
 
  273    _GLIBCXX_SIMD_INTRINSIC
 
  275    _M_is_constprop()
 const 
  277      if constexpr (is_class_v<_FirstType>)
 
  278        return first._M_is_constprop() && second._M_is_constprop();
 
  280        return __builtin_constant_p(first) && second._M_is_constprop();
 
  284template <
typename _FirstType, 
typename _Tp>
 
  285  struct _SimdTupleData<_FirstType, _SimdTuple<_Tp>>
 
  288    static constexpr _SimdTuple<_Tp> second = {};
 
  290    _GLIBCXX_SIMD_INTRINSIC
 
  292    _M_is_constprop()
 const 
  294      if constexpr (is_class_v<_FirstType>)
 
  295        return first._M_is_constprop();
 
  297        return __builtin_constant_p(first);
 
  302template <
typename _Tp, 
typename _Abi0, 
typename... _Abis>
 
  303  struct _SimdTuple<_Tp, _Abi0, _Abis...>
 
  304    : _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
 
  305                     _SimdTuple<_Tp, _Abis...>>
 
  307    static_assert(!__is_fixed_size_abi_v<_Abi0>);
 
  308    using value_type = _Tp;
 
  309    using _FirstType = 
typename _SimdTraits<_Tp, _Abi0>::_SimdMember;
 
  310    using _FirstAbi = _Abi0;
 
  311    using _SecondType = _SimdTuple<_Tp, _Abis...>;
 
  312    static constexpr size_t _S_tuple_size = 
sizeof...(_Abis) + 1;
 
  314    static constexpr size_t _S_size()
 
  315    { 
return simd_size_v<_Tp, _Abi0> + _SecondType::_S_size(); }
 
  317    static constexpr size_t _S_first_size = simd_size_v<_Tp, _Abi0>;
 
  318    static constexpr bool _S_is_homogeneous = (is_same_v<_Abi0, _Abis> && ...);
 
  320    using _Base = _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
 
  321                                 _SimdTuple<_Tp, _Abis...>>;
 
  325    _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple() = 
default;
 
  326    _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple(
const _SimdTuple&) = 
default;
 
  327    _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple& operator=(
const _SimdTuple&)
 
  330    template <
typename _Up>
 
  331      _GLIBCXX_SIMD_INTRINSIC 
constexpr 
  332      _SimdTuple(_Up&& __x)
 
  333      : _Base{static_cast<_Up&&>(__x)} {}
 
  335    template <
typename _Up, 
typename _Up2>
 
  336      _GLIBCXX_SIMD_INTRINSIC 
constexpr 
  337      _SimdTuple(_Up&& __x, _Up2&& __y)
 
  338      : _Base{static_cast<_Up&&>(__x), static_cast<_Up2&&>(__y)} {}
 
  340    template <
typename _Up>
 
  341      _GLIBCXX_SIMD_INTRINSIC 
constexpr 
  342      _SimdTuple(_Up&& __x, _SimdTuple<_Tp>)
 
  343      : _Base{static_cast<_Up&&>(__x)} {}
 
  345    _GLIBCXX_SIMD_INTRINSIC 
char*
 
  347    { 
return reinterpret_cast<char*
>(
this); }
 
  349    _GLIBCXX_SIMD_INTRINSIC 
const char*
 
  350    _M_as_charptr()
 const 
  351    { 
return reinterpret_cast<const char*
>(
this); }
 
  353    template <
size_t _Np>
 
  354      _GLIBCXX_SIMD_INTRINSIC 
constexpr auto&
 
  357        if constexpr (_Np == 0)
 
  360          return second.template _M_at<_Np - 1>();
 
  363    template <
size_t _Np>
 
  364      _GLIBCXX_SIMD_INTRINSIC 
constexpr const auto&
 
  367        if constexpr (_Np == 0)
 
  370          return second.template _M_at<_Np - 1>();
 
  373    template <
size_t _Np>
 
  374      _GLIBCXX_SIMD_INTRINSIC 
constexpr auto 
  377        if constexpr (_Np == 0)
 
  378          return simd<_Tp, _Abi0>(__private_init, first);
 
  380          return second.template _M_simd_at<_Np - 1>();
 
  383    template <
size_t _Offset = 0, 
typename _Fp>
 
  384      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple
 
  385      _S_generate(_Fp&& __gen, _SizeConstant<_Offset> = {})
 
  387        auto&& __first = __gen(__tuple_element_meta<_Tp, _Abi0, _Offset>());
 
  388        if constexpr (_S_tuple_size == 1)
 
  392                  _SecondType::_S_generate(
 
  393                    static_cast<_Fp&&
>(__gen),
 
  394                    _SizeConstant<_Offset + simd_size_v<_Tp, _Abi0>>())};
 
  397    template <
size_t _Offset = 0, 
typename _Fp, 
typename... _More>
 
  398      _GLIBCXX_SIMD_INTRINSIC _SimdTuple
 
  399      _M_apply_wrapped(_Fp&& __fun, 
const _More&... __more)
 const 
  402          = __fun(__make_meta<_Offset>(*
this), first, __more.first...);
 
  403        if constexpr (_S_tuple_size == 1)
 
  408            second.template _M_apply_wrapped<_Offset + simd_size_v<_Tp, _Abi0>>(
 
  409              static_cast<_Fp&&
>(__fun), __more.second...)};
 
  412    template <
typename _Tup>
 
  413      _GLIBCXX_SIMD_INTRINSIC 
constexpr decltype(
auto)
 
  414      _M_extract_argument(_Tup&& __tup)
 const 
  416        using _TupT = 
typename __remove_cvref_t<_Tup>::value_type;
 
  417        if constexpr (is_same_v<_SimdTuple, __remove_cvref_t<_Tup>>)
 
  419        else if (__builtin_is_constant_evaluated())
 
  420          return __fixed_size_storage_t<_TupT, _S_first_size>::_S_generate(
 
  421                   [&](
auto __meta) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  422                     return __meta._S_generator(
 
  423                              [&](
auto __i) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  425                              }, 
static_cast<_TupT*
>(
nullptr));
 
  429            __fixed_size_storage_t<_TupT, _S_first_size> __r;
 
  430            __builtin_memcpy(__r._M_as_charptr(), __tup._M_as_charptr(),
 
  436    template <
typename _Tup>
 
  437      _GLIBCXX_SIMD_INTRINSIC 
constexpr auto&
 
  438      _M_skip_argument(_Tup&& __tup)
 const 
  440        static_assert(_S_tuple_size > 1);
 
  441        using _Up = __remove_cvref_t<_Tup>;
 
  442        constexpr size_t __off = __offset<_Up>;
 
  443        if constexpr (_S_first_size == _Up::_S_first_size && __off == 0)
 
  445        else if constexpr (_S_first_size > _Up::_S_first_size
 
  446                           && _S_first_size % _Up::_S_first_size == 0
 
  448          return __simd_tuple_pop_front<_S_first_size>(__tup);
 
  449        else if constexpr (_S_first_size + __off < _Up::_S_first_size)
 
  450          return __add_offset<_S_first_size>(__tup);
 
  451        else if constexpr (_S_first_size + __off == _Up::_S_first_size)
 
  454          __assert_unreachable<_Tup>();
 
  457    template <
size_t _Offset, 
typename... _More>
 
  458      _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  459      _M_assign_front(
const _SimdTuple<_Tp, _Abi0, _More...>& __x) &
 
  461        static_assert(_Offset == 0);
 
  463        if constexpr (
sizeof...(_More) > 0)
 
  465            static_assert(
sizeof...(_Abis) >= 
sizeof...(_More));
 
  466            second.template _M_assign_front<0>(__x.second);
 
  470    template <
size_t _Offset>
 
  471      _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  472      _M_assign_front(
const _FirstType& __x) &
 
  474        static_assert(_Offset == 0);
 
  478    template <
size_t _Offset, 
typename... _As>
 
  479      _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  480      _M_assign_front(
const _SimdTuple<_Tp, _As...>& __x) &
 
  482        __builtin_memcpy(_M_as_charptr() + _Offset * 
sizeof(value_type),
 
  484                         sizeof(_Tp) * _SimdTuple<_Tp, _As...>::_S_size());
 
  492    template <
typename _Fp, 
typename... _More>
 
  493      _GLIBCXX_SIMD_INTRINSIC 
constexpr _SimdTuple
 
  494      _M_apply_per_chunk(_Fp&& __fun, _More&&... __more)
 const 
  498                         is_lvalue_reference<_More>,
 
  499                         negation<is_const<remove_reference_t<_More>>>>) )
 
  502            auto&& __first = [&](
auto... __args) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  503              auto __r = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
 
  505              [[maybe_unused]] 
auto&& __ignore_me = {(
 
  506                [](
auto&& __dst, 
const auto& __src) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  507                  if constexpr (is_assignable_v<
decltype(__dst),
 
  510                      __dst.template _M_assign_front<__offset<
decltype(__dst)>>(
 
  513                }(
static_cast<_More&&
>(__more), __args),
 
  516            }(_M_extract_argument(__more)...);
 
  517            if constexpr (_S_tuple_size == 1)
 
  521                      second._M_apply_per_chunk(
static_cast<_Fp&&
>(__fun),
 
  522                                                _M_skip_argument(__more)...)};
 
  526            auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
 
  527                                   _M_extract_argument(__more)...);
 
  528            if constexpr (_S_tuple_size == 1)
 
  532                      second._M_apply_per_chunk(
static_cast<_Fp&&
>(__fun),
 
  533                                                _M_skip_argument(__more)...)};
 
  537    template <
typename _R = _Tp, 
typename _Fp, 
typename... _More>
 
  538      _GLIBCXX_SIMD_INTRINSIC 
auto 
  539      _M_apply_r(_Fp&& __fun, 
const _More&... __more)
 const 
  541        auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
 
  543        if constexpr (_S_tuple_size == 1)
 
  546          return __simd_tuple_concat<_R>(
 
  547            __first, second.template _M_apply_r<_R>(
static_cast<_Fp&&
>(__fun),
 
  551    template <
typename _Fp, 
typename... _More>
 
  552      _GLIBCXX_SIMD_INTRINSIC 
constexpr friend _SanitizedBitMask<_S_size()>
 
  553      _M_test(
const _Fp& __fun, 
const _SimdTuple& __x, 
const _More&... __more)
 
  555        const _SanitizedBitMask<_S_first_size> __first
 
  556          = _Abi0::_MaskImpl::_S_to_bits(
 
  557            __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), __x.first,
 
  559        if constexpr (_S_tuple_size == 1)
 
  562          return _M_test(__fun, __x.second, __more.second...)
 
  563            ._M_prepend(__first);
 
  566    template <
typename _Up, _Up _I>
 
  567      _GLIBCXX_SIMD_INTRINSIC 
constexpr _Tp
 
  568      operator[](integral_constant<_Up, _I>) 
const noexcept 
  570        if constexpr (_I < simd_size_v<_Tp, _Abi0>)
 
  571          return _M_subscript_read(_I);
 
  573          return second[integral_constant<_Up, _I - simd_size_v<_Tp, _Abi0>>()];
 
  576    _GLIBCXX_SIMD_INTRINSIC _Tp
 
  577    operator[](
size_t __i) 
const noexcept 
  579      if constexpr (_S_tuple_size == 1)
 
  580        return _M_subscript_read(__i);
 
  583#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS 
  584          return reinterpret_cast<const __may_alias<_Tp>*
>(
this)[__i];
 
  586          if constexpr (__is_scalar_abi<_Abi0>())
 
  588              const _Tp* ptr = &first;
 
  592            return __i < simd_size_v<_Tp, _Abi0>
 
  593                     ? _M_subscript_read(__i)
 
  594                     : second[__i - simd_size_v<_Tp, _Abi0>];
 
  599    _GLIBCXX_SIMD_INTRINSIC 
void 
  600    _M_set(
size_t __i, _Tp __val) 
noexcept 
  602      if constexpr (_S_tuple_size == 1)
 
  603        return _M_subscript_write(__i, __val);
 
  606#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS 
  607          reinterpret_cast<__may_alias<_Tp>*
>(
this)[__i] = __val;
 
  609          if (__i < simd_size_v<_Tp, _Abi0>)
 
  610            _M_subscript_write(__i, __val);
 
  612            second._M_set(__i - simd_size_v<_Tp, _Abi0>, __val);
 
  619    _GLIBCXX_SIMD_INTRINSIC _Tp
 
  620    _M_subscript_read([[maybe_unused]] 
size_t __i) 
const noexcept 
  622      if constexpr (__is_vectorizable_v<_FirstType>)
 
  628    _GLIBCXX_SIMD_INTRINSIC 
void 
  629    _M_subscript_write([[maybe_unused]] 
size_t __i, _Tp __y) 
noexcept 
  631      if constexpr (__is_vectorizable_v<_FirstType>)
 
  634        first._M_set(__i, __y);
 
  641template <
typename _Tp, 
typename _A0>
 
  642  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
 
  643  __make_simd_tuple(simd<_Tp, _A0> __x0)
 
  644  { 
return {__data(__x0)}; }
 
  646template <
typename _Tp, 
typename _A0, 
typename... _As>
 
  647  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _As...>
 
  648  __make_simd_tuple(
const simd<_Tp, _A0>& __x0, 
const simd<_Tp, _As>&... __xs)
 
  649  { 
return {__data(__x0), __make_simd_tuple(__xs...)}; }
 
  651template <
typename _Tp, 
typename _A0>
 
  652  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
 
  653  __make_simd_tuple(
const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0)
 
  656template <
typename _Tp, 
typename _A0, 
typename _A1, 
typename... _Abis>
 
  657  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _A1, _Abis...>
 
  659    const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0,
 
  660    const typename _SimdTraits<_Tp, _A1>::_SimdMember& __arg1,
 
  661    const typename _SimdTraits<_Tp, _Abis>::_SimdMember&... __args)
 
  662  { 
return {__arg0, __make_simd_tuple<_Tp, _A1, _Abis...>(__arg1, __args...)}; }
 
  665template <
typename _Tp, 
size_t _Np, 
typename _V, 
size_t _NV, 
typename... _VX>
 
  666  _GLIBCXX_SIMD_INTRINSIC 
constexpr __fixed_size_storage_t<_Tp, _Np>
 
  667  __to_simd_tuple(
const array<_V, _NV>& __from, 
const _VX... __fromX);
 
  669template <
typename _Tp, 
size_t _Np,
 
  671          typename _R = __fixed_size_storage_t<_Tp, _Np>, 
typename _V0,
 
  672          typename _V0VT = _VectorTraits<_V0>, 
typename... _VX>
 
  673  _GLIBCXX_SIMD_INTRINSIC _R 
constexpr __to_simd_tuple(
const _V0 __from0, 
const _VX... __fromX)
 
  675    static_assert(is_same_v<typename _V0VT::value_type, _Tp>);
 
  676    static_assert(_Offset < _V0VT::_S_full_size);
 
  677    using _R0 = __vector_type_t<_Tp, _R::_S_first_size>;
 
  678    if constexpr (_R::_S_tuple_size == 1)
 
  680        if constexpr (_Np == 1)
 
  681          return _R{__from0[_Offset]};
 
  682        else if constexpr (_Offset == 0 && _V0VT::_S_full_size >= _Np)
 
  683          return _R{__intrin_bitcast<_R0>(__from0)};
 
  684        else if constexpr (_Offset * 2 == _V0VT::_S_full_size
 
  685                           && _V0VT::_S_full_size / 2 >= _Np)
 
  686          return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0))};
 
  687        else if constexpr (_Offset * 4 == _V0VT::_S_full_size
 
  688                           && _V0VT::_S_full_size / 4 >= _Np)
 
  689          return _R{__intrin_bitcast<_R0>(__extract_part<1, 4>(__from0))};
 
  691          __assert_unreachable<_Tp>();
 
  695        if constexpr (1 == _R::_S_first_size)
 
  697            if constexpr (_Offset + 1 < _V0VT::_S_full_size)
 
  698              return _R{__from0[_Offset],
 
  699                        __to_simd_tuple<_Tp, _Np - 1, _Offset + 1>(__from0,
 
  702              return _R{__from0[_Offset],
 
  703                        __to_simd_tuple<_Tp, _Np - 1, 0>(__fromX...)};
 
  707        else if constexpr (_V0VT::_S_full_size == _R::_S_first_size
 
  710                    __to_simd_tuple<_Tp, _Np - _R::_S_first_size>(__fromX...)};
 
  713        else if constexpr (_V0VT::_S_full_size > _R::_S_first_size
 
  715          return _R{__intrin_bitcast<_R0>(__from0),
 
  716                    __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
 
  717                                    _R::_S_first_size>(__from0, __fromX...)};
 
  721        else if constexpr (_Offset * 4 == _V0VT::_S_full_size
 
  722                           && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
 
  723          return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
 
  724                    __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
 
  725                                    _Offset + _R::_S_first_size>(__from0,
 
  730        else if constexpr (_Offset * 2 == _V0VT::_S_full_size
 
  731                           && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
 
  732          return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
 
  733                    __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
 
  734                                    _Offset + _R::_S_first_size>(__from0,
 
  738        else if constexpr (_Offset * 2 == _V0VT::_S_full_size
 
  739                           && _V0VT::_S_full_size / 2 >= _R::_S_first_size)
 
  740          return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0)),
 
  741                    __to_simd_tuple<_Tp, _Np - _R::_S_first_size, 0>(
 
  746          __assert_unreachable<_Tp>();
 
  750template <
typename _Tp, 
size_t _Np, 
typename _V, 
size_t _NV, 
typename... _VX>
 
  751  _GLIBCXX_SIMD_INTRINSIC 
constexpr __fixed_size_storage_t<_Tp, _Np>
 
  752  __to_simd_tuple(
const array<_V, _NV>& __from, 
const _VX... __fromX)
 
  754    if constexpr (is_same_v<_Tp, _V>)
 
  758          "An array of scalars must be the last argument to __to_simd_tuple");
 
  759        return __call_with_subscripts(
 
  760                 __from, make_index_sequence<_NV>(),
 
  761                 [&](
const auto... __args) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  762                   return __simd_tuple_concat(
 
  763                            _SimdTuple<_Tp, simd_abi::scalar>{__args}..., _SimdTuple<_Tp>());
 
  767      return __call_with_subscripts(
 
  768               __from, make_index_sequence<_NV>(),
 
  769               [&](
const auto... __args) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  770                 return __to_simd_tuple<_Tp, _Np>(__args..., __fromX...);
 
  774template <
size_t, 
typename _Tp>
 
  775  using __to_tuple_helper = _Tp;
 
  777template <
typename _Tp, 
typename _A0, 
size_t _NOut, 
size_t _Np,
 
  779  _GLIBCXX_SIMD_INTRINSIC __fixed_size_storage_t<_Tp, _NOut>
 
  780  __to_simd_tuple_impl(index_sequence<_Indexes...>,
 
  781      const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
 
  783    return __make_simd_tuple<_Tp, __to_tuple_helper<_Indexes, _A0>...>(
 
  784      __args[_Indexes]...);
 
  787template <
typename _Tp, 
typename _A0, 
size_t _NOut, 
size_t _Np,
 
  788          typename _R = __fixed_size_storage_t<_Tp, _NOut>>
 
  789  _GLIBCXX_SIMD_INTRINSIC _R
 
  790  __to_simd_tuple_sized(
 
  791    const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
 
  793    static_assert(_Np * simd_size_v<_Tp, _A0> >= _NOut);
 
  794    return __to_simd_tuple_impl<_Tp, _A0, _NOut>(
 
  795      make_index_sequence<_R::_S_tuple_size>(), __args);
 
  799template <
typename _Tp>
 
  800  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp>
 
  801  __optimize_simd_tuple(
const _SimdTuple<_Tp>)
 
  804template <
typename _Tp, 
typename _Ap>
 
  805  _GLIBCXX_SIMD_INTRINSIC 
const _SimdTuple<_Tp, _Ap>&
 
  806  __optimize_simd_tuple(
const _SimdTuple<_Tp, _Ap>& __x)
 
  809template <
typename _Tp, 
typename _A0, 
typename _A1, 
typename... _Abis,
 
  810          typename _R = __fixed_size_storage_t<
 
  811            _Tp, _SimdTuple<_Tp, _A0, _A1, _Abis...>::_S_size()>>
 
  812  _GLIBCXX_SIMD_INTRINSIC _R
 
  813  __optimize_simd_tuple(
const _SimdTuple<_Tp, _A0, _A1, _Abis...>& __x)
 
  815    using _Tup = _SimdTuple<_Tp, _A0, _A1, _Abis...>;
 
  816    if constexpr (is_same_v<_R, _Tup>)
 
  818    else if constexpr (is_same_v<
typename _R::_FirstType,
 
  819                                 typename _Tup::_FirstType>)
 
  820      return {__x.first, __optimize_simd_tuple(__x.second)};
 
  821    else if constexpr (__is_scalar_abi<_A0>()
 
  822                       || _A0::template _S_is_partial<_Tp>)
 
  823      return {__generate_from_n_evaluations<_R::_S_first_size,
 
  824                                            typename _R::_FirstType>(
 
  825                [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { 
return __x[__i]; }),
 
  826              __optimize_simd_tuple(
 
  827                __simd_tuple_pop_front<_R::_S_first_size>(__x))};
 
  828    else if constexpr (is_same_v<_A0, _A1>
 
  829        && _R::_S_first_size == simd_size_v<_Tp, _A0> + simd_size_v<_Tp, _A1>)
 
  830      return {__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
 
  831              __optimize_simd_tuple(__x.second.second)};
 
  832    else if constexpr (
sizeof...(_Abis) >= 2
 
  833        && _R::_S_first_size == (4 * simd_size_v<_Tp, _A0>)
 
  834        && simd_size_v<_Tp, _A0> == __simd_tuple_element_t<
 
  835            (
sizeof...(_Abis) >= 2 ? 3 : 0), _Tup>::size())
 
  837        __concat(__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
 
  838                 __concat(__x.template _M_at<2>(), __x.template _M_at<3>())),
 
  839        __optimize_simd_tuple(__x.second.second.second.second)};
 
  842        static_assert(
sizeof(_R) == 
sizeof(__x));
 
  844        __builtin_memcpy(__r._M_as_charptr(), __x._M_as_charptr(),
 
  845                         sizeof(_Tp) * _R::_S_size());
 
  851template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _Fp>
 
  852  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  853  __for_each(
const _SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
 
  854  { 
static_cast<_Fp&&
>(__fun)(__make_meta<_Offset>(__t), __t.first); }
 
  856template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _A1,
 
  857          typename... _As, 
typename _Fp>
 
  858  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  859  __for_each(
const _SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
 
  861    __fun(__make_meta<_Offset>(__t), __t.first);
 
  862    __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
 
  863                                                     static_cast<_Fp&&
>(__fun));
 
  867template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _Fp>
 
  868  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  869  __for_each(_SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
 
  870  { 
static_cast<_Fp&&
>(__fun)(__make_meta<_Offset>(__t), __t.first); }
 
  872template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _A1,
 
  873          typename... _As, 
typename _Fp>
 
  874  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  875  __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
 
  877    __fun(__make_meta<_Offset>(__t), __t.first);
 
  878    __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
 
  879                                                     static_cast<_Fp&&
>(__fun));
 
  883template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _Fp>
 
  884  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  885  __for_each(_SimdTuple<_Tp, _A0>& __a, 
const _SimdTuple<_Tp, _A0>& __b, _Fp&& __fun)
 
  886  { 
static_cast<_Fp&&
>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first); }
 
  888template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _A1,
 
  889          typename... _As, 
typename _Fp>
 
  890  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  891  __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __a,
 
  892             const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
 
  894    __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
 
  895    __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
 
  896                                                     static_cast<_Fp&&
>(__fun));
 
  900template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _Fp>
 
  901  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  902  __for_each(
const _SimdTuple<_Tp, _A0>& __a, 
const _SimdTuple<_Tp, _A0>& __b, _Fp&& __fun)
 
  903  { 
static_cast<_Fp&&
>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first); }
 
  905template <
size_t _Offset = 0, 
typename _Tp, 
typename _A0, 
typename _A1,
 
  906          typename... _As, 
typename _Fp>
 
  907  _GLIBCXX_SIMD_INTRINSIC 
constexpr void 
  908  __for_each(
const _SimdTuple<_Tp, _A0, _A1, _As...>& __a,
 
  909             const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
 
  911    __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
 
  912    __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
 
  913                                                     static_cast<_Fp&&
>(__fun));
 
  918template <
int _Index, 
int _Total, 
int _Combine, 
typename _Tp, 
typename _A0, 
typename... _As>
 
  919  _GLIBCXX_SIMD_INTRINSIC 
auto  
  920  __extract_part(
const _SimdTuple<_Tp, _A0, _As...>& __x)
 
  926    using _Tuple = _SimdTuple<_Tp, _A0, _As...>;
 
  927    static_assert(_Index + _Combine <= _Total && _Index >= 0 && _Total >= 1);
 
  928    constexpr size_t _Np = _Tuple::_S_size();
 
  929    static_assert(_Np >= _Total && _Np % _Total == 0);
 
  930    constexpr size_t __values_per_part = _Np / _Total;
 
  931    [[maybe_unused]] 
constexpr size_t __values_to_skip
 
  932      = _Index * __values_per_part;
 
  933    constexpr size_t __return_size = __values_per_part * _Combine;
 
  934    using _RetAbi = simd_abi::deduce_t<_Tp, __return_size>;
 
  937    if constexpr (_Index == 0 && _Tuple::_S_first_size == __return_size)
 
  938      return __x.first._M_data;
 
  939    else if constexpr (_Index == 0 && _Total == _Combine)
 
  941    else if constexpr (_Index == 0 && _Tuple::_S_first_size >= __return_size)
 
  942      return __intrin_bitcast<__vector_type_t<_Tp, __return_size>>(
 
  943        __as_vector(__x.first));
 
  946    else if constexpr (__values_to_skip >= _Tuple::_S_first_size)
 
  948        if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
 
  950            constexpr int __parts_in_first
 
  951              = _Tuple::_S_first_size / __values_per_part;
 
  952            return __extract_part<_Index - __parts_in_first,
 
  953                                  _Total - __parts_in_first, _Combine>(
 
  957          return __extract_part<__values_to_skip - _Tuple::_S_first_size,
 
  958                                _Np - _Tuple::_S_first_size, __return_size>(
 
  963    else if constexpr (__return_size > _Tuple::_S_first_size - __values_to_skip)
 
  965#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS 
  966        const __may_alias<_Tp>* 
const element_ptr
 
  967          = 
reinterpret_cast<const __may_alias<_Tp>*
>(&__x) + __values_to_skip;
 
  968        return __as_vector(simd<_Tp, _RetAbi>(element_ptr, element_aligned));
 
  970        [[maybe_unused]] 
constexpr size_t __offset = __values_to_skip;
 
  971        return __as_vector(simd<_Tp, _RetAbi>(
 
  972                             [&](
auto __i) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
  973                               constexpr _SizeConstant<__i + __offset> __k;
 
  980    else if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
 
  981      return __extract_part<_Index, _Tuple::_S_first_size / __values_per_part,
 
  982                            _Combine>(__x.first);
 
  984      return __extract_part<__values_to_skip, _Tuple::_S_first_size,
 
  985                            _Combine * __values_per_part>(__x.first);
 
  990template <
typename _Tp, 
int _Np, 
typename _Tuple,
 
  991          typename _Next = simd<_Tp, _AllNativeAbis::_BestAbi<_Tp, _Np>>,
 
  992          int _Remain = _Np - int(_Next::size())>
 
  993  struct __fixed_size_storage_builder;
 
  995template <
typename _Tp, 
int _Np>
 
  996  struct __fixed_size_storage
 
  997  : 
public __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp>> {};
 
  999template <
typename _Tp, 
int _Np, 
typename... _As, 
typename _Next>
 
 1000  struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
 
 1002  { 
using type = _SimdTuple<_Tp, _As..., 
typename _Next::abi_type>; };
 
 1004template <
typename _Tp, 
int _Np, 
typename... _As, 
typename _Next, 
int _Remain>
 
 1005  struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
 
 1008    using type = 
typename __fixed_size_storage_builder<
 
 1009      _Tp, _Remain, _SimdTuple<_Tp, _As..., 
typename _Next::abi_type>>::type;
 
 1014template <
typename _Tp, 
bool = is_arithmetic_v<__remove_cvref_t<_Tp>>>
 
 1015  struct __autocvt_to_simd
 
 1018    using _TT = __remove_cvref_t<_Tp>;
 
 1020    _GLIBCXX_SIMD_INTRINSIC
 
 1024    _GLIBCXX_SIMD_INTRINSIC
 
 1027      static_assert(is_lvalue_reference<_Tp>::value, 
"");
 
 1028      static_assert(!is_const<_Tp>::value, 
"");
 
 1032    _GLIBCXX_SIMD_INTRINSIC
 
 1035      static_assert(is_lvalue_reference<_Tp>::value, 
"");
 
 1036      static_assert(!is_const<_Tp>::value, 
"");
 
 1040    _GLIBCXX_SIMD_INTRINSIC 
constexpr 
 1041    __autocvt_to_simd(_Tp dd) : _M_data(dd) {}
 
 1043    template <
typename _Abi>
 
 1044      _GLIBCXX_SIMD_INTRINSIC
 
 1045      operator simd<typename _TT::value_type, _Abi>()
 
 1046      { 
return {__private_init, _M_data}; }
 
 1048    template <
typename _Abi>
 
 1049      _GLIBCXX_SIMD_INTRINSIC
 
 1050      operator simd<typename _TT::value_type, _Abi>&()
 
 1051      { 
return *
reinterpret_cast<simd<typename _TT::value_type, _Abi>*
>(&_M_data); }
 
 1053    template <
typename _Abi>
 
 1054      _GLIBCXX_SIMD_INTRINSIC
 
 1055      operator simd<typename _TT::value_type, _Abi>*()
 
 1056      { 
return reinterpret_cast<simd<typename _TT::value_type, _Abi>*
>(&_M_data); }
 
 1059template <
typename _Tp>
 
 1060  __autocvt_to_simd(_Tp &&) -> __autocvt_to_simd<_Tp>;
 
 1062template <
typename _Tp>
 
 1063  struct __autocvt_to_simd<_Tp, true>
 
 1065    using _TT = __remove_cvref_t<_Tp>;
 
 1067    fixed_size_simd<_TT, 1> _M_fd;
 
 1069    _GLIBCXX_SIMD_INTRINSIC
 
 1070    constexpr __autocvt_to_simd(_Tp dd) : _M_data(dd), _M_fd(_M_data) {}
 
 1072    _GLIBCXX_SIMD_INTRINSIC
 
 1073    ~__autocvt_to_simd()
 
 1074    { _M_data = __data(_M_fd).first; }
 
 1076    _GLIBCXX_SIMD_INTRINSIC
 
 1077    operator fixed_size_simd<_TT, 1>()
 
 1080    _GLIBCXX_SIMD_INTRINSIC
 
 1081    operator fixed_size_simd<_TT, 1> &()
 
 1083      static_assert(is_lvalue_reference<_Tp>::value, 
"");
 
 1084      static_assert(!is_const<_Tp>::value, 
"");
 
 1088    _GLIBCXX_SIMD_INTRINSIC
 
 1089    operator fixed_size_simd<_TT, 1> *()
 
 1091      static_assert(is_lvalue_reference<_Tp>::value, 
"");
 
 1092      static_assert(!is_const<_Tp>::value, 
"");
 
 1099struct _CommonImplFixedSize;
 
 1100template <
int _Np, 
typename = __detail::__odr_helper> 
struct _SimdImplFixedSize;
 
 1101template <
int _Np, 
typename = __detail::__odr_helper> 
struct _MaskImplFixedSize;
 
 1104  struct simd_abi::_Fixed
 
 1106    template <
typename _Tp> 
static constexpr size_t _S_size = _Np;
 
 1107    template <
typename _Tp> 
static constexpr size_t _S_full_size = _Np;
 
 1109    struct _IsValidAbiTag : 
public __bool_constant<(_Np > 0)> {};
 
 1111    template <
typename _Tp>
 
 1112      struct _IsValidSizeFor
 
 1113      : __bool_constant<(_Np <= simd_abi::max_fixed_size<_Tp>)> {};
 
 1115    template <typename _Tp>
 
 1116      struct _IsValid : conjunction<_IsValidAbiTag, __is_vectorizable<_Tp>,
 
 1117                                    _IsValidSizeFor<_Tp>> {};
 
 1119    template <typename _Tp>
 
 1120      static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
 
 1124    _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
 
 1125    _S_masked(_BitMask<_Np> __x)
 
 1126    { return __x._M_sanitized(); }
 
 1128    _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
 
 1129    _S_masked(_SanitizedBitMask<_Np> __x)
 
 1134    using _CommonImpl = _CommonImplFixedSize;
 
 1135    using _SimdImpl = _SimdImplFixedSize<_Np>;
 
 1136    using _MaskImpl = _MaskImplFixedSize<_Np>;
 
 1140    template <typename _Tp, bool = _S_is_valid_v<_Tp>>
 
 1141      struct __traits : _InvalidTraits {};
 
 1143    template <typename _Tp>
 
 1144      struct __traits<_Tp, true>
 
 1146        using _IsValid = true_type;
 
 1147        using _SimdImpl = _SimdImplFixedSize<_Np>;
 
 1148        using _MaskImpl = _MaskImplFixedSize<_Np>;
 
 1151        using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
 
 1152        using _MaskMember = _SanitizedBitMask<_Np>;
 
 1154        static constexpr size_t _S_simd_align
 
 1155          = std::__bit_ceil(_Np * sizeof(_Tp));
 
 1157        static constexpr size_t _S_mask_align = alignof(_MaskMember);
 
 1165          _GLIBCXX_SIMD_ALWAYS_INLINE
 
 1166          _SimdBase(const _SimdBase&) {}
 
 1167          _SimdBase() = default;
 
 1169          _GLIBCXX_SIMD_ALWAYS_INLINE explicit
 
 1170          operator const _SimdMember &() const
 
 1171          { return static_cast<const simd<_Tp, _Fixed>*>(this)->_M_data; }
 
 1173          _GLIBCXX_SIMD_ALWAYS_INLINE explicit
 
 1174          operator array<_Tp, _Np>() const
 
 1176            array<_Tp, _Np> __r;
 
 1178            static_assert(
sizeof(__r) <= 
sizeof(_SimdMember), 
"");
 
 1179            __builtin_memcpy(__r.data(), &
static_cast<const _SimdMember&
>(*
this),
 
 1188        struct _MaskBase {};
 
 1192        struct _SimdCastType
 
 1194          _GLIBCXX_SIMD_ALWAYS_INLINE
 
 1195          _SimdCastType(
const array<_Tp, _Np>&);
 
 1197          _GLIBCXX_SIMD_ALWAYS_INLINE
 
 1198          _SimdCastType(
const _SimdMember& dd) : _M_data(dd) {}
 
 1200          _GLIBCXX_SIMD_ALWAYS_INLINE 
explicit 
 1201          operator const _SimdMember &() 
const { 
return _M_data; }
 
 1204          const _SimdMember& _M_data;
 
 1211          _MaskCastType() = 
delete;
 
 1220struct _CommonImplFixedSize
 
 1223  template <
typename _Tp, 
typename... _As>
 
 1224    _GLIBCXX_SIMD_INTRINSIC 
static void 
 1225    _S_store(
const _SimdTuple<_Tp, _As...>& __x, 
void* __addr)
 
 1227      constexpr size_t _Np = _SimdTuple<_Tp, _As...>::_S_size();
 
 1228      __builtin_memcpy(__addr, &__x, _Np * 
sizeof(_Tp));
 
 1238template <
int _Np, 
typename>
 
 1239  struct _SimdImplFixedSize
 
 1242    using _MaskMember = _SanitizedBitMask<_Np>;
 
 1244    template <
typename _Tp>
 
 1245      using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
 
 1247    template <
typename _Tp>
 
 1248      static constexpr size_t _S_tuple_size = _SimdMember<_Tp>::_S_tuple_size;
 
 1250    template <
typename _Tp>
 
 1251      using _Simd = simd<_Tp, simd_abi::fixed_size<_Np>>;
 
 1253    template <
typename _Tp>
 
 1254      using _TypeTag = _Tp*;
 
 1257    template <
typename _Tp>
 
 1258      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdMember<_Tp>
 
 1259      _S_broadcast(_Tp __x) 
noexcept 
 1261        return _SimdMember<_Tp>::_S_generate(
 
 1262                 [&](
auto __meta) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1263                   return __meta._S_broadcast(__x);
 
 1268    template <
typename _Fp, 
typename _Tp>
 
 1269      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdMember<_Tp>
 
 1270      _S_generator(_Fp&& __gen, _TypeTag<_Tp>)
 
 1272        return _SimdMember<_Tp>::_S_generate(
 
 1273                 [&__gen](
auto __meta) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1274                   return __meta._S_generator(
 
 1275                            [&](
auto __i) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1276                              return __i < _Np ? __gen(_SizeConstant<__meta._S_offset + __i>())
 
 1284    template <
typename _Tp, 
typename _Up>
 
 1285      _GLIBCXX_SIMD_INTRINSIC 
static _SimdMember<_Tp>
 
 1286      _S_load(
const _Up* __mem, _TypeTag<_Tp>) 
noexcept 
 1288        return _SimdMember<_Tp>::_S_generate(
 
 1289                 [&](
auto __meta) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1290                   return __meta._S_load(&__mem[__meta._S_offset], _TypeTag<_Tp>());
 
 1295    template <
typename _Tp, 
typename... _As, 
typename _Up>
 
 1296      _GLIBCXX_SIMD_INTRINSIC 
static _SimdTuple<_Tp, _As...>
 
 1297      _S_masked_load(
const _SimdTuple<_Tp, _As...>& __old,
 
 1298                     const _MaskMember __bits, 
const _Up* __mem) 
noexcept 
 1300        auto __merge = __old;
 
 1301        __for_each(__merge, [&](
auto __meta, 
auto& __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1302          if (__meta._S_submask(__bits).any())
 
 1303#pragma GCC diagnostic push
 
 1308#pragma GCC diagnostic ignored 
"-Warray-bounds" 
 1310              = __meta._S_masked_load(__native, __meta._S_make_mask(__bits),
 
 1311                                      __mem + __meta._S_offset);
 
 1312#pragma GCC diagnostic pop
 
 1318    template <
typename _Tp, 
typename _Up>
 
 1319      _GLIBCXX_SIMD_INTRINSIC 
static void 
 1320      _S_store(
const _SimdMember<_Tp>& __v, _Up* __mem, _TypeTag<_Tp>) 
noexcept 
 1322        __for_each(__v, [&](
auto __meta, 
auto __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1323          __meta._S_store(__native, &__mem[__meta._S_offset], _TypeTag<_Tp>());
 
 1328    template <
typename _Tp, 
typename... _As, 
typename _Up>
 
 1329      _GLIBCXX_SIMD_INTRINSIC 
static void 
 1330      _S_masked_store(
const _SimdTuple<_Tp, _As...>& __v, _Up* __mem,
 
 1331                      const _MaskMember __bits) 
noexcept 
 1333        __for_each(__v, [&](
auto __meta, 
auto __native) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1334          if (__meta._S_submask(__bits).any())
 
 1335#pragma GCC diagnostic push
 
 1340#pragma GCC diagnostic ignored 
"-Warray-bounds" 
 1341            __meta._S_masked_store(__native, __mem + __meta._S_offset,
 
 1342                                   __meta._S_make_mask(__bits));
 
 1343#pragma GCC diagnostic pop
 
 1348    template <
typename _Tp, 
typename... _As>
 
 1349      static inline _MaskMember
 
 1350      _S_negate(
const _SimdTuple<_Tp, _As...>& __x) 
noexcept 
 1352        _MaskMember __bits = 0;
 
 1354          __x, [&__bits](
auto __meta, 
auto __native) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1356              |= __meta._S_mask_to_shifted_ullong(__meta._S_negate(__native));
 
 1362    template <
typename _Tp, 
typename _BinaryOperation>
 
 1363      static constexpr inline _Tp _S_reduce(
const _Simd<_Tp>& __x,
 
 1364                                            const _BinaryOperation& __binary_op)
 
 1366        using _Tup = _SimdMember<_Tp>;
 
 1367        const _Tup& __tup = __data(__x);
 
 1368        if constexpr (_Tup::_S_tuple_size == 1)
 
 1369          return _Tup::_FirstAbi::_SimdImpl::_S_reduce(
 
 1370            __tup.template _M_simd_at<0>(), __binary_op);
 
 1371        else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 2
 
 1372                           && _Tup::_SecondType::_S_size() == 1)
 
 1374            return __binary_op(simd<_Tp, simd_abi::scalar>(
 
 1375                                 reduce(__tup.template _M_simd_at<0>(),
 
 1377                               __tup.template _M_simd_at<1>())[0];
 
 1379        else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 4
 
 1380                           && _Tup::_SecondType::_S_size() == 2)
 
 1383              simd<_Tp, simd_abi::scalar>(
 
 1384                reduce(__tup.template _M_simd_at<0>(), __binary_op)),
 
 1385              simd<_Tp, simd_abi::scalar>(
 
 1386                reduce(__tup.template _M_simd_at<1>(), __binary_op)))[0];
 
 1390            const auto& __x2 = __call_with_n_evaluations<
 
 1391              __div_roundup(_Tup::_S_tuple_size, 2)>(
 
 1392              [](
auto __first_simd, 
auto... __remaining) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1393                if constexpr (
sizeof...(__remaining) == 0)
 
 1394                  return __first_simd;
 
 1399                                   typename decltype(__first_simd)::abi_type,
 
 1400                                   typename decltype(__remaining)::abi_type...>;
 
 1401                    return fixed_size_simd<_Tp, _Tup2::_S_size()>(
 
 1403                      __make_simd_tuple(__first_simd, __remaining...));
 
 1406              [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1407                auto __left = __tup.template _M_simd_at<2 * __i>();
 
 1408                if constexpr (2 * __i + 1 == _Tup::_S_tuple_size)
 
 1412                    auto __right = __tup.template _M_simd_at<2 * __i + 1>();
 
 1413                    using _LT = 
decltype(__left);
 
 1414                    using _RT = 
decltype(__right);
 
 1415                    if constexpr (_LT::size() == _RT::size())
 
 1416                      return __binary_op(__left, __right);
 
 1419                        _GLIBCXX_SIMD_USE_CONSTEXPR_API
 
 1420                        typename _LT::mask_type __k(
 
 1422                          [](
auto __j) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1423                            return __j < _RT::size();
 
 1425                        _LT __ext_right = __left;
 
 1426                        where(__k, __ext_right)
 
 1427                          = __proposed::resizing_simd_cast<_LT>(__right);
 
 1428                        where(__k, __left) = __binary_op(__left, __ext_right);
 
 1433            return reduce(__x2, __binary_op);
 
 1438    template <
typename _Tp, 
typename... _As>
 
 1439      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1440      _S_min(
const _SimdTuple<_Tp, _As...>& __a, 
const _SimdTuple<_Tp, _As...>& __b)
 
 1442        return __a._M_apply_per_chunk(
 
 1443          [](
auto __impl, 
auto __aa, 
auto __bb) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1444            return __impl._S_min(__aa, __bb);
 
 1449    template <
typename _Tp, 
typename... _As>
 
 1450      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1451      _S_max(
const _SimdTuple<_Tp, _As...>& __a, 
const _SimdTuple<_Tp, _As...>& __b)
 
 1453        return __a._M_apply_per_chunk(
 
 1454          [](
auto __impl, 
auto __aa, 
auto __bb) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1455            return __impl._S_max(__aa, __bb);
 
 1461    template <
typename _Tp, 
typename... _As>
 
 1462      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1463      _S_complement(
const _SimdTuple<_Tp, _As...>& __x) 
noexcept 
 1465        return __x._M_apply_per_chunk(
 
 1466                 [](
auto __impl, 
auto __xx) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1467                   return __impl._S_complement(__xx);
 
 1472    template <
typename _Tp, 
typename... _As>
 
 1473      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1474      _S_unary_minus(
const _SimdTuple<_Tp, _As...>& __x) 
noexcept 
 1476        return __x._M_apply_per_chunk(
 
 1477                 [](
auto __impl, 
auto __xx) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1478                   return __impl._S_unary_minus(__xx);
 
 1484#define _GLIBCXX_SIMD_FIXED_OP(name_, op_)                                                     \ 
 1485    template <typename _Tp, typename... _As>                                                   \ 
 1486      _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple<_Tp, _As...> name_(                  \ 
 1487        const _SimdTuple<_Tp, _As...>& __x, const _SimdTuple<_Tp, _As...>& __y)                \ 
 1489        return __x._M_apply_per_chunk(                                                         \ 
 1490          [](auto __impl, auto __xx, auto __yy) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { \ 
 1491            return __impl.name_(__xx, __yy);                                                   \ 
 1496    _GLIBCXX_SIMD_FIXED_OP(_S_plus, +)
 
 1497    _GLIBCXX_SIMD_FIXED_OP(_S_minus, -)
 
 1498    _GLIBCXX_SIMD_FIXED_OP(_S_multiplies, *)
 
 1499    _GLIBCXX_SIMD_FIXED_OP(_S_divides, /)
 
 1500    _GLIBCXX_SIMD_FIXED_OP(_S_modulus, %)
 
 1501    _GLIBCXX_SIMD_FIXED_OP(_S_bit_and, &)
 
 1502    _GLIBCXX_SIMD_FIXED_OP(_S_bit_or, |)
 
 1503    _GLIBCXX_SIMD_FIXED_OP(_S_bit_xor, ^)
 
 1504    _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_left, <<)
 
 1505    _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_right, >>)
 
 1506#undef _GLIBCXX_SIMD_FIXED_OP 
 1508    template <
typename _Tp, 
typename... _As>
 
 1509      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1510      _S_bit_shift_left(
const _SimdTuple<_Tp, _As...>& __x, 
int __y)
 
 1512        return __x._M_apply_per_chunk(
 
 1513                 [__y](
auto __impl, 
auto __xx) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1514                   return __impl._S_bit_shift_left(__xx, __y);
 
 1518    template <
typename _Tp, 
typename... _As>
 
 1519      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SimdTuple<_Tp, _As...>
 
 1520      _S_bit_shift_right(
const _SimdTuple<_Tp, _As...>& __x, 
int __y)
 
 1522        return __x._M_apply_per_chunk(
 
 1523                 [__y](
auto __impl, 
auto __xx) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1524                   return __impl._S_bit_shift_right(__xx, __y);
 
 1529#define _GLIBCXX_SIMD_APPLY_ON_TUPLE(_RetTp, __name)                           \ 
 1530    template <typename _Tp, typename... _As, typename... _More>                \ 
 1531      static inline __fixed_size_storage_t<_RetTp, _Np>                        \ 
 1532        _S_##__name(const _SimdTuple<_Tp, _As...>& __x,                        \ 
 1533                    const _More&... __more)                                    \ 
 1535        if constexpr (sizeof...(_More) == 0)                                   \ 
 1537            if constexpr (is_same_v<_Tp, _RetTp>)                              \ 
 1538              return __x._M_apply_per_chunk(                                   \ 
 1539                       [](auto __impl, auto __xx)                              \ 
 1540                         constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA          \ 
 1542                         using _V = typename decltype(__impl)::simd_type;      \ 
 1543                         return __data(__name(_V(__private_init, __xx)));      \ 
 1546              return __optimize_simd_tuple(                                    \ 
 1547                       __x.template _M_apply_r<_RetTp>(                        \ 
 1548                         [](auto __impl, auto __xx)                            \ 
 1549                           _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA                  \ 
 1550                         { return __impl._S_##__name(__xx); }));               \ 
 1552        else if constexpr (                                                    \ 
 1555            _RetTp> && (... && is_same_v<_SimdTuple<_Tp, _As...>, _More>) )    \ 
 1556          return __x._M_apply_per_chunk(                                       \ 
 1557                   [](auto __impl, auto __xx, auto... __pack)                  \ 
 1558                     constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA              \ 
 1560                     using _V = typename decltype(__impl)::simd_type;          \ 
 1561                     return __data(__name(_V(__private_init, __xx),            \ 
 1562                                          _V(__private_init, __pack)...));     \ 
 1564        else if constexpr (is_same_v<_Tp, _RetTp>)                             \ 
 1565          return __x._M_apply_per_chunk(                                       \ 
 1566                   [](auto __impl, auto __xx, auto... __pack)                  \ 
 1567                     constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA              \ 
 1569                     using _V = typename decltype(__impl)::simd_type;          \ 
 1570                     return __data(__name(_V(__private_init, __xx),            \ 
 1571                                          __autocvt_to_simd(__pack)...));      \ 
 1574          __assert_unreachable<_Tp>();                                         \ 
 1577    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acos)
 
 1578    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asin)
 
 1579    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan)
 
 1580    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan2)
 
 1581    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cos)
 
 1582    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sin)
 
 1583    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tan)
 
 1584    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acosh)
 
 1585    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asinh)
 
 1586    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atanh)
 
 1587    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cosh)
 
 1588    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sinh)
 
 1589    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tanh)
 
 1590    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp)
 
 1591    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp2)
 
 1592    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, expm1)
 
 1593    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
int, ilogb)
 
 1594    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log)
 
 1595    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log10)
 
 1596    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log1p)
 
 1597    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log2)
 
 1598    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, logb)
 
 1600    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp,
 
 1602    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, scalbln)
 
 1603    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cbrt)
 
 1604    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, abs)
 
 1605    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fabs)
 
 1606    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, pow)
 
 1607    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sqrt)
 
 1608    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erf)
 
 1609    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erfc)
 
 1610    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, lgamma)
 
 1611    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tgamma)
 
 1612    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, trunc)
 
 1613    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ceil)
 
 1614    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, floor)
 
 1615    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nearbyint)
 
 1617    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, rint)
 
 1618    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
long, lrint)
 
 1619    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
long long, llrint)
 
 1621    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, round)
 
 1622    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
long, lround)
 
 1623    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
long long, llround)
 
 1625    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ldexp)
 
 1626    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmod)
 
 1627    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, remainder)
 
 1628    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, copysign)
 
 1629    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nextafter)
 
 1630    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fdim)
 
 1631    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmax)
 
 1632    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmin)
 
 1633    _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fma)
 
 1634    _GLIBCXX_SIMD_APPLY_ON_TUPLE(
int, fpclassify)
 
 1635#undef _GLIBCXX_SIMD_APPLY_ON_TUPLE 
 1637    template <
typename _Tp, 
typename... _Abis>
 
 1638      static inline _SimdTuple<_Tp, _Abis...>
 
 1639      _S_remquo(
const _SimdTuple<_Tp, _Abis...>& __x, 
const _SimdTuple<_Tp, _Abis...>& __y,
 
 1640                __fixed_size_storage_t<
int, _SimdTuple<_Tp, _Abis...>::_S_size()>* __z)
 
 1642        return __x._M_apply_per_chunk(
 
 1643                 [](
auto __impl, 
const auto __xx, 
const auto __yy, 
auto& __zz)
 
 1644                   _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
 
 1645                 { 
return __impl._S_remquo(__xx, __yy, &__zz); },
 
 1649    template <
typename _Tp, 
typename... _As>
 
 1650      static inline _SimdTuple<_Tp, _As...>
 
 1651      _S_frexp(
const _SimdTuple<_Tp, _As...>& __x,
 
 1652               __fixed_size_storage_t<int, _Np>& __exp) 
noexcept 
 1654        return __x._M_apply_per_chunk(
 
 1655                 [](
auto __impl, 
const auto& __a, 
auto& __b) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1656                   return __data(frexp(
typename decltype(__impl)::simd_type(__private_init, __a),
 
 1657                                       __autocvt_to_simd(__b)));
 
 1661#define _GLIBCXX_SIMD_TEST_ON_TUPLE_(name_)                                              \ 
 1662    template <typename _Tp, typename... _As>                                             \ 
 1663      static inline _MaskMember                                                          \ 
 1664      _S_##name_(const _SimdTuple<_Tp, _As...>& __x) noexcept                            \ 
 1666        return _M_test([] (auto __impl, auto __xx) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA  { \ 
 1667                 return __impl._S_##name_(__xx);                                         \ 
 1671    _GLIBCXX_SIMD_TEST_ON_TUPLE_(isinf)
 
 1672    _GLIBCXX_SIMD_TEST_ON_TUPLE_(isfinite)
 
 1673    _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnan)
 
 1674    _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnormal)
 
 1675    _GLIBCXX_SIMD_TEST_ON_TUPLE_(signbit)
 
 1676#undef _GLIBCXX_SIMD_TEST_ON_TUPLE_ 
 1679    template <
typename... _Ts>
 
 1680      _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 1681      _S_increment(_SimdTuple<_Ts...>& __x)
 
 1684          __x, [](
auto __meta, 
auto& native) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1685            __meta._S_increment(native);
 
 1689    template <
typename... _Ts>
 
 1690      _GLIBCXX_SIMD_INTRINSIC 
static constexpr void 
 1691      _S_decrement(_SimdTuple<_Ts...>& __x)
 
 1694          __x, [](
auto __meta, 
auto& native) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1695            __meta._S_decrement(native);
 
 1700#define _GLIBCXX_SIMD_CMP_OPERATIONS(__cmp)                                    \ 
 1701    template <typename _Tp, typename... _As>                                   \ 
 1702      _GLIBCXX_SIMD_INTRINSIC constexpr static _MaskMember                     \ 
 1703      __cmp(const _SimdTuple<_Tp, _As...>& __x,                                \ 
 1704            const _SimdTuple<_Tp, _As...>& __y)                                \ 
 1706        return _M_test([](auto __impl, auto __xx, auto __yy)                   \ 
 1707                         constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA          \ 
 1708                       { return __impl.__cmp(__xx, __yy); },                   \ 
 1712    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_equal_to)
 
 1713    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_not_equal_to)
 
 1714    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less)
 
 1715    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less_equal)
 
 1716    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isless)
 
 1717    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessequal)
 
 1718    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreater)
 
 1719    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreaterequal)
 
 1720    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessgreater)
 
 1721    _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isunordered)
 
 1722#undef _GLIBCXX_SIMD_CMP_OPERATIONS 
 1725    template <
typename _Tp, 
typename... _As, 
typename _Up>
 
 1726      _GLIBCXX_SIMD_INTRINSIC 
static void 
 1727      _S_set(_SimdTuple<_Tp, _As...>& __v, 
int __i, _Up&& __x) 
noexcept 
 1728      { __v._M_set(__i, 
static_cast<_Up&&
>(__x)); }
 
 1731    template <
typename _Tp, 
typename... _As>
 
 1732      _GLIBCXX_SIMD_INTRINSIC 
static void 
 1733      _S_masked_assign(
const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
 
 1734                       const __type_identity_t<_SimdTuple<_Tp, _As...>>& __rhs)
 
 1736        __for_each(__lhs, __rhs,
 
 1737                   [&](
auto __meta, 
auto& __native_lhs, 
auto __native_rhs)
 
 1738                     constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
 
 1740                     __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
 
 1747    template <
typename _Tp, 
typename... _As>
 
 1748      _GLIBCXX_SIMD_INTRINSIC 
static void 
 1749      _S_masked_assign(
const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
 
 1750                       const __type_identity_t<_Tp> __rhs)
 
 1753          __lhs, [&](
auto __meta, 
auto& __native_lhs) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1754            __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
 
 1760    template <
typename _Op, 
typename _Tp, 
typename... _As>
 
 1762      _S_masked_cassign(
const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
 
 1763                        const _SimdTuple<_Tp, _As...>& __rhs, _Op __op)
 
 1765        __for_each(__lhs, __rhs,
 
 1766                   [&](
auto __meta, 
auto& __native_lhs, 
auto __native_rhs)
 
 1767                     constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
 
 1769                     __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
 
 1770                                                       __native_lhs, __native_rhs, __op);
 
 1776    template <
typename _Op, 
typename _Tp, 
typename... _As>
 
 1778      _S_masked_cassign(
const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
 
 1779                        const _Tp& __rhs, _Op __op)
 
 1782          __lhs, [&](
auto __meta, 
auto& __native_lhs) 
constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1783            __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
 
 1784                                              __native_lhs, __rhs, __op);
 
 1789    template <
template <
typename> 
class _Op, 
typename _Tp, 
typename... _As>
 
 1790      static inline _SimdTuple<_Tp, _As...>
 
 1791      _S_masked_unary(
const _MaskMember __bits, 
const _SimdTuple<_Tp, _As...>& __v)
 
 1793        return __v._M_apply_wrapped([&__bits](
auto __meta,
 
 1794                                              auto __native) 
constexpr {
 
 1795          return __meta.template _S_masked_unary<_Op>(__meta._S_make_mask(
 
 1805template <
int _Np, 
typename>
 
 1806  struct _MaskImplFixedSize
 
 1809      sizeof(_ULLong) * __CHAR_BIT__ >= _Np,
 
 1810      "The fixed_size implementation relies on one _ULLong being able to store " 
 1811      "all boolean elements."); 
 
 1814    using _Abi = simd_abi::fixed_size<_Np>;
 
 1816    using _MaskMember = _SanitizedBitMask<_Np>;
 
 1818    template <
typename _Tp>
 
 1819      using _FirstAbi = 
typename __fixed_size_storage_t<_Tp, _Np>::_FirstAbi;
 
 1821    template <
typename _Tp>
 
 1822      using _TypeTag = _Tp*;
 
 1827      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1828      _S_broadcast(
bool __x)
 
 1829      { 
return __x ? ~_MaskMember() : _MaskMember(); }
 
 1834      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1835      _S_load(
const bool* __mem)
 
 1837        using _Ip = __int_for_sizeof_t<bool>;
 
 1841        const simd<_Ip, _Abi> __bools(
reinterpret_cast<const __may_alias<_Ip>*
>(
 
 1844        return __data(__bools != 0);
 
 1849    template <
bool _Sanitized>
 
 1850      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _SanitizedBitMask<_Np>
 
 1851      _S_to_bits(_BitMask<_Np, _Sanitized> __x)
 
 1853        if constexpr (_Sanitized)
 
 1856          return __x._M_sanitized();
 
 1861    template <
typename _Tp, 
typename _Up, 
typename _UAbi>
 
 1862      _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1863      _S_convert(simd_mask<_Up, _UAbi> __x)
 
 1865        return _UAbi::_MaskImpl::_S_to_bits(__data(__x))
 
 1866          .template _M_extract<0, _Np>();
 
 1871    template <
typename _Tp>
 
 1872      _GLIBCXX_SIMD_INTRINSIC 
static _MaskMember
 
 1873      _S_from_bitmask(_MaskMember __bits, _TypeTag<_Tp>) 
noexcept 
 1877    static inline _MaskMember
 
 1878    _S_load(
const bool* __mem) 
noexcept 
 1883      using _Vs = __fixed_size_storage_t<_UChar, _Np>;
 
 1884      __for_each(_Vs{}, [&](
auto __meta, 
auto) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1885        __r |= __meta._S_mask_to_shifted_ullong(
 
 1886          __meta._S_mask_impl._S_load(&__mem[__meta._S_offset],
 
 1887                                      _SizeConstant<__meta._S_size()>()));
 
 1893    static inline _MaskMember
 
 1894    _S_masked_load(_MaskMember __merge, _MaskMember __mask, 
const bool* __mem) 
noexcept 
 1896      _BitOps::_S_bit_iteration(__mask.to_ullong(),
 
 1897                                [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
 
 1898                                  __merge.set(__i, __mem[__i]);
 
 1905    _S_store(
const _MaskMember __bitmask, 
bool* __mem) 
noexcept 
 1907      if constexpr (_Np == 1)
 
 1908        __mem[0] = __bitmask[0];
 
 1910        _FirstAbi<_UChar>::_CommonImpl::_S_store_bool_array(__bitmask, __mem);
 
 1915    _S_masked_store(
const _MaskMember __v, 
bool* __mem, 
const _MaskMember __k) 
noexcept 
 1917      _BitOps::_S_bit_iteration(
 
 1918        __k, [&](
auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { __mem[__i] = __v[__i]; });
 
 1922    _GLIBCXX_SIMD_INTRINSIC 
static _MaskMember
 
 1923    _S_logical_and(
const _MaskMember& __x, 
const _MaskMember& __y) 
noexcept 
 1924    { 
return __x & __y; }
 
 1926    _GLIBCXX_SIMD_INTRINSIC 
static _MaskMember
 
 1927    _S_logical_or(
const _MaskMember& __x, 
const _MaskMember& __y) 
noexcept 
 1928    { 
return __x | __y; }
 
 1930    _GLIBCXX_SIMD_INTRINSIC 
static constexpr _MaskMember
 
 1931    _S_bit_not(
const _MaskMember& __x) 
noexcept 
 1934    _GLIBCXX_SIMD_INTRINSIC 
static _MaskMember
 
 1935    _S_bit_and(
const _MaskMember& __x, 
const _MaskMember& __y) 
noexcept 
 1936    { 
return __x & __y; }
 
 1938    _GLIBCXX_SIMD_INTRINSIC 
static _MaskMember
 
 1939    _S_bit_or(
const _MaskMember& __x, 
const _MaskMember& __y) 
noexcept 
 1940    { 
return __x | __y; }
 
 1942    _GLIBCXX_SIMD_INTRINSIC 
static _MaskMember
 
 1943    _S_bit_xor(
const _MaskMember& __x, 
const _MaskMember& __y) 
noexcept 
 1944    { 
return __x ^ __y; }
 
 1947    _GLIBCXX_SIMD_INTRINSIC 
static void 
 1948    _S_set(_MaskMember& __k, 
int __i, 
bool __x) 
noexcept 
 1949    { __k.set(__i, __x); }
 
 1952    _GLIBCXX_SIMD_INTRINSIC 
static void 
 1953    _S_masked_assign(
const _MaskMember __k, _MaskMember& __lhs, 
const _MaskMember __rhs)
 
 1954    { __lhs = (__lhs & ~__k) | (__rhs & __k); }
 
 1957    _GLIBCXX_SIMD_INTRINSIC 
static void 
 1958    _S_masked_assign(
const _MaskMember __k, _MaskMember& __lhs, 
const bool __rhs)
 
 1968    template <
typename _Tp>
 
 1969      _GLIBCXX_SIMD_INTRINSIC 
static bool 
 1970      _S_all_of(simd_mask<_Tp, _Abi> __k)
 
 1971      { 
return __data(__k).all(); }
 
 1975    template <
typename _Tp>
 
 1976      _GLIBCXX_SIMD_INTRINSIC 
static bool 
 1977      _S_any_of(simd_mask<_Tp, _Abi> __k)
 
 1978      { 
return __data(__k).any(); }
 
 1982    template <
typename _Tp>
 
 1983      _GLIBCXX_SIMD_INTRINSIC 
static bool 
 1984      _S_none_of(simd_mask<_Tp, _Abi> __k)
 
 1985      { 
return __data(__k).none(); }
 
 1989    template <
typename _Tp>
 
 1990      _GLIBCXX_SIMD_INTRINSIC 
static bool 
 1991      _S_some_of([[maybe_unused]] simd_mask<_Tp, _Abi> __k)
 
 1993        if constexpr (_Np == 1)
 
 1996          return __data(__k).any() && !__data(__k).all();
 
 2001    template <
typename _Tp>
 
 2002      _GLIBCXX_SIMD_INTRINSIC 
static int 
 2003      _S_popcount(simd_mask<_Tp, _Abi> __k)
 
 2004      { 
return __data(__k).count(); }
 
 2008    template <
typename _Tp>
 
 2009      _GLIBCXX_SIMD_INTRINSIC 
static int 
 2010      _S_find_first_set(simd_mask<_Tp, _Abi> __k)
 
 2011      { 
return std::__countr_zero(__data(__k).to_ullong()); }
 
 2015    template <
typename _Tp>
 
 2016      _GLIBCXX_SIMD_INTRINSIC 
static int 
 2017      _S_find_last_set(simd_mask<_Tp, _Abi> __k)
 
 2018      { 
return std::__bit_width(__data(__k).to_ullong()) - 1; }
 
 2024_GLIBCXX_SIMD_END_NAMESPACE
 
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.
 
constexpr _Iterator __base(_Iterator __it)