30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
33#pragma GCC system_header
40#if __cplusplus > 201703L && _GLIBCXX_HOSTED
44#ifndef _GLIBCXX_ALWAYS_INLINE
45#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
48namespace std _GLIBCXX_VISIBILITY(default)
50_GLIBCXX_BEGIN_NAMESPACE_VERSION
60#if __cplusplus > 201703L
71 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
72 inline constexpr memory_order memory_order_consume = memory_order::consume;
73 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
74 inline constexpr memory_order memory_order_release = memory_order::release;
75 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
76 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
89 enum __memory_order_modifier
91 __memory_order_mask = 0x0ffff,
92 __memory_order_modifier_mask = 0xffff0000,
93 __memory_order_hle_acquire = 0x10000,
94 __memory_order_hle_release = 0x20000
113 return __m == memory_order_acq_rel ? memory_order_acquire
114 : __m == memory_order_release ? memory_order_relaxed : __m;
120 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
121 | __memory_order_modifier(__m & __memory_order_modifier_mask));
125 __is_valid_cmpexch_failure_order(
memory_order __m)
noexcept
127 return (__m & __memory_order_mask) != memory_order_release
128 && (__m & __memory_order_mask) != memory_order_acq_rel;
131 _GLIBCXX_ALWAYS_INLINE
void
133 { __atomic_thread_fence(
int(__m)); }
135 _GLIBCXX_ALWAYS_INLINE
void
137 { __atomic_signal_fence(
int(__m)); }
140 template<
typename _Tp>
149 template<
typename _IntTp>
150 struct __atomic_base;
152#if __cplusplus <= 201703L
153# define _GLIBCXX20_INIT(I)
155# define __cpp_lib_atomic_value_initialization 201911L
156# define _GLIBCXX20_INIT(I) = I
159#define ATOMIC_VAR_INIT(_VI) { _VI }
161 template<
typename _Tp>
164 template<
typename _Tp>
168#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
169 typedef bool __atomic_flag_data_type;
171 typedef unsigned char __atomic_flag_data_type;
184 _GLIBCXX_BEGIN_EXTERN_C
188 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
191 _GLIBCXX_END_EXTERN_C
193#define ATOMIC_FLAG_INIT { 0 }
209 _GLIBCXX_ALWAYS_INLINE
bool
210 test_and_set(
memory_order __m = memory_order_seq_cst)
noexcept
212 return __atomic_test_and_set (&_M_i,
int(__m));
215 _GLIBCXX_ALWAYS_INLINE
bool
216 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
218 return __atomic_test_and_set (&_M_i,
int(__m));
221#if __cplusplus > 201703L
222#define __cpp_lib_atomic_flag_test 201907L
224 _GLIBCXX_ALWAYS_INLINE
bool
225 test(
memory_order __m = memory_order_seq_cst)
const noexcept
227 __atomic_flag_data_type __v;
228 __atomic_load(&_M_i, &__v,
int(__m));
229 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
232 _GLIBCXX_ALWAYS_INLINE
bool
233 test(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
235 __atomic_flag_data_type __v;
236 __atomic_load(&_M_i, &__v,
int(__m));
237 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
240#if __cpp_lib_atomic_wait
241 _GLIBCXX_ALWAYS_INLINE
void
245 const __atomic_flag_data_type __v
246 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
248 std::__atomic_wait_address_v(&_M_i, __v,
249 [__m,
this] {
return __atomic_load_n(&_M_i,
int(__m)); });
254 _GLIBCXX_ALWAYS_INLINE
void
255 notify_one()
noexcept
256 { std::__atomic_notify_address(&_M_i,
false); }
260 _GLIBCXX_ALWAYS_INLINE
void
261 notify_all()
noexcept
262 { std::__atomic_notify_address(&_M_i,
true); }
268 _GLIBCXX_ALWAYS_INLINE
void
272 = __m & __memory_order_mask;
273 __glibcxx_assert(__b != memory_order_consume);
274 __glibcxx_assert(__b != memory_order_acquire);
275 __glibcxx_assert(__b != memory_order_acq_rel);
277 __atomic_clear (&_M_i,
int(__m));
280 _GLIBCXX_ALWAYS_INLINE
void
281 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
284 = __m & __memory_order_mask;
285 __glibcxx_assert(__b != memory_order_consume);
286 __glibcxx_assert(__b != memory_order_acquire);
287 __glibcxx_assert(__b != memory_order_acq_rel);
289 __atomic_clear (&_M_i,
int(__m));
293 static constexpr __atomic_flag_data_type
295 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
323 template<
typename _ITp>
326 using value_type = _ITp;
327 using difference_type = value_type;
330 typedef _ITp __int_type;
332 static constexpr int _S_alignment =
333 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) :
alignof(_ITp);
335 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
345 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
347 operator __int_type()
const noexcept
350 operator __int_type()
const volatile noexcept
354 operator=(__int_type __i)
noexcept
361 operator=(__int_type __i)
volatile noexcept
368 operator++(
int)
noexcept
369 {
return fetch_add(1); }
372 operator++(
int)
volatile noexcept
373 {
return fetch_add(1); }
376 operator--(
int)
noexcept
377 {
return fetch_sub(1); }
380 operator--(
int)
volatile noexcept
381 {
return fetch_sub(1); }
384 operator++()
noexcept
385 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
388 operator++()
volatile noexcept
389 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
392 operator--()
noexcept
393 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
396 operator--()
volatile noexcept
397 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
400 operator+=(__int_type __i)
noexcept
401 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
404 operator+=(__int_type __i)
volatile noexcept
405 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
408 operator-=(__int_type __i)
noexcept
409 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
412 operator-=(__int_type __i)
volatile noexcept
413 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
416 operator&=(__int_type __i)
noexcept
417 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
420 operator&=(__int_type __i)
volatile noexcept
421 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
424 operator|=(__int_type __i)
noexcept
425 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
428 operator|=(__int_type __i)
volatile noexcept
429 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
432 operator^=(__int_type __i)
noexcept
433 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
436 operator^=(__int_type __i)
volatile noexcept
437 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
440 is_lock_free()
const noexcept
443 return __atomic_is_lock_free(
sizeof(_M_i),
444 reinterpret_cast<void *
>(-_S_alignment));
448 is_lock_free()
const volatile noexcept
451 return __atomic_is_lock_free(
sizeof(_M_i),
452 reinterpret_cast<void *
>(-_S_alignment));
455 _GLIBCXX_ALWAYS_INLINE
void
456 store(__int_type __i,
memory_order __m = memory_order_seq_cst)
noexcept
459 = __m & __memory_order_mask;
460 __glibcxx_assert(__b != memory_order_acquire);
461 __glibcxx_assert(__b != memory_order_acq_rel);
462 __glibcxx_assert(__b != memory_order_consume);
464 __atomic_store_n(&_M_i, __i,
int(__m));
467 _GLIBCXX_ALWAYS_INLINE
void
468 store(__int_type __i,
469 memory_order __m = memory_order_seq_cst)
volatile noexcept
472 = __m & __memory_order_mask;
473 __glibcxx_assert(__b != memory_order_acquire);
474 __glibcxx_assert(__b != memory_order_acq_rel);
475 __glibcxx_assert(__b != memory_order_consume);
477 __atomic_store_n(&_M_i, __i,
int(__m));
480 _GLIBCXX_ALWAYS_INLINE __int_type
481 load(
memory_order __m = memory_order_seq_cst)
const noexcept
484 = __m & __memory_order_mask;
485 __glibcxx_assert(__b != memory_order_release);
486 __glibcxx_assert(__b != memory_order_acq_rel);
488 return __atomic_load_n(&_M_i,
int(__m));
491 _GLIBCXX_ALWAYS_INLINE __int_type
492 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
495 = __m & __memory_order_mask;
496 __glibcxx_assert(__b != memory_order_release);
497 __glibcxx_assert(__b != memory_order_acq_rel);
499 return __atomic_load_n(&_M_i,
int(__m));
502 _GLIBCXX_ALWAYS_INLINE __int_type
503 exchange(__int_type __i,
506 return __atomic_exchange_n(&_M_i, __i,
int(__m));
510 _GLIBCXX_ALWAYS_INLINE __int_type
511 exchange(__int_type __i,
512 memory_order __m = memory_order_seq_cst)
volatile noexcept
514 return __atomic_exchange_n(&_M_i, __i,
int(__m));
517 _GLIBCXX_ALWAYS_INLINE
bool
518 compare_exchange_weak(__int_type& __i1, __int_type __i2,
521 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
523 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
524 int(__m1),
int(__m2));
527 _GLIBCXX_ALWAYS_INLINE
bool
528 compare_exchange_weak(__int_type& __i1, __int_type __i2,
532 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
534 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
535 int(__m1),
int(__m2));
538 _GLIBCXX_ALWAYS_INLINE
bool
539 compare_exchange_weak(__int_type& __i1, __int_type __i2,
542 return compare_exchange_weak(__i1, __i2, __m,
543 __cmpexch_failure_order(__m));
546 _GLIBCXX_ALWAYS_INLINE
bool
547 compare_exchange_weak(__int_type& __i1, __int_type __i2,
548 memory_order __m = memory_order_seq_cst)
volatile noexcept
550 return compare_exchange_weak(__i1, __i2, __m,
551 __cmpexch_failure_order(__m));
554 _GLIBCXX_ALWAYS_INLINE
bool
555 compare_exchange_strong(__int_type& __i1, __int_type __i2,
558 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
560 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
561 int(__m1),
int(__m2));
564 _GLIBCXX_ALWAYS_INLINE
bool
565 compare_exchange_strong(__int_type& __i1, __int_type __i2,
569 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
571 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
572 int(__m1),
int(__m2));
575 _GLIBCXX_ALWAYS_INLINE
bool
576 compare_exchange_strong(__int_type& __i1, __int_type __i2,
579 return compare_exchange_strong(__i1, __i2, __m,
580 __cmpexch_failure_order(__m));
583 _GLIBCXX_ALWAYS_INLINE
bool
584 compare_exchange_strong(__int_type& __i1, __int_type __i2,
585 memory_order __m = memory_order_seq_cst)
volatile noexcept
587 return compare_exchange_strong(__i1, __i2, __m,
588 __cmpexch_failure_order(__m));
591#if __cpp_lib_atomic_wait
592 _GLIBCXX_ALWAYS_INLINE
void
593 wait(__int_type __old,
596 std::__atomic_wait_address_v(&_M_i, __old,
597 [__m,
this] {
return this->load(__m); });
602 _GLIBCXX_ALWAYS_INLINE
void
603 notify_one()
noexcept
604 { std::__atomic_notify_address(&_M_i,
false); }
608 _GLIBCXX_ALWAYS_INLINE
void
609 notify_all()
noexcept
610 { std::__atomic_notify_address(&_M_i,
true); }
615 _GLIBCXX_ALWAYS_INLINE __int_type
616 fetch_add(__int_type __i,
618 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
620 _GLIBCXX_ALWAYS_INLINE __int_type
621 fetch_add(__int_type __i,
622 memory_order __m = memory_order_seq_cst)
volatile noexcept
623 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
625 _GLIBCXX_ALWAYS_INLINE __int_type
626 fetch_sub(__int_type __i,
628 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
630 _GLIBCXX_ALWAYS_INLINE __int_type
631 fetch_sub(__int_type __i,
632 memory_order __m = memory_order_seq_cst)
volatile noexcept
633 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
635 _GLIBCXX_ALWAYS_INLINE __int_type
636 fetch_and(__int_type __i,
638 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
640 _GLIBCXX_ALWAYS_INLINE __int_type
641 fetch_and(__int_type __i,
642 memory_order __m = memory_order_seq_cst)
volatile noexcept
643 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
645 _GLIBCXX_ALWAYS_INLINE __int_type
646 fetch_or(__int_type __i,
648 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
650 _GLIBCXX_ALWAYS_INLINE __int_type
651 fetch_or(__int_type __i,
652 memory_order __m = memory_order_seq_cst)
volatile noexcept
653 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
655 _GLIBCXX_ALWAYS_INLINE __int_type
656 fetch_xor(__int_type __i,
658 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
660 _GLIBCXX_ALWAYS_INLINE __int_type
661 fetch_xor(__int_type __i,
662 memory_order __m = memory_order_seq_cst)
volatile noexcept
663 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
668 template<
typename _PTp>
672 typedef _PTp* __pointer_type;
674 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
678 _M_type_size(ptrdiff_t __d)
const {
return __d *
sizeof(_PTp); }
681 _M_type_size(ptrdiff_t __d)
const volatile {
return __d *
sizeof(_PTp); }
691 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
693 operator __pointer_type()
const noexcept
696 operator __pointer_type()
const volatile noexcept
700 operator=(__pointer_type __p)
noexcept
707 operator=(__pointer_type __p)
volatile noexcept
714 operator++(
int)
noexcept
715 {
return fetch_add(1); }
718 operator++(
int)
volatile noexcept
719 {
return fetch_add(1); }
722 operator--(
int)
noexcept
723 {
return fetch_sub(1); }
726 operator--(
int)
volatile noexcept
727 {
return fetch_sub(1); }
730 operator++()
noexcept
731 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
732 int(memory_order_seq_cst)); }
735 operator++()
volatile noexcept
736 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
737 int(memory_order_seq_cst)); }
740 operator--()
noexcept
741 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
742 int(memory_order_seq_cst)); }
745 operator--()
volatile noexcept
746 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
747 int(memory_order_seq_cst)); }
750 operator+=(ptrdiff_t __d)
noexcept
751 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
752 int(memory_order_seq_cst)); }
755 operator+=(ptrdiff_t __d)
volatile noexcept
756 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
757 int(memory_order_seq_cst)); }
760 operator-=(ptrdiff_t __d)
noexcept
761 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
762 int(memory_order_seq_cst)); }
765 operator-=(ptrdiff_t __d)
volatile noexcept
766 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
767 int(memory_order_seq_cst)); }
770 is_lock_free()
const noexcept
773 return __atomic_is_lock_free(
sizeof(_M_p),
774 reinterpret_cast<void *
>(-__alignof(_M_p)));
778 is_lock_free()
const volatile noexcept
781 return __atomic_is_lock_free(
sizeof(_M_p),
782 reinterpret_cast<void *
>(-__alignof(_M_p)));
785 _GLIBCXX_ALWAYS_INLINE
void
786 store(__pointer_type __p,
790 = __m & __memory_order_mask;
792 __glibcxx_assert(__b != memory_order_acquire);
793 __glibcxx_assert(__b != memory_order_acq_rel);
794 __glibcxx_assert(__b != memory_order_consume);
796 __atomic_store_n(&_M_p, __p,
int(__m));
799 _GLIBCXX_ALWAYS_INLINE
void
800 store(__pointer_type __p,
801 memory_order __m = memory_order_seq_cst)
volatile noexcept
804 = __m & __memory_order_mask;
805 __glibcxx_assert(__b != memory_order_acquire);
806 __glibcxx_assert(__b != memory_order_acq_rel);
807 __glibcxx_assert(__b != memory_order_consume);
809 __atomic_store_n(&_M_p, __p,
int(__m));
812 _GLIBCXX_ALWAYS_INLINE __pointer_type
813 load(
memory_order __m = memory_order_seq_cst)
const noexcept
816 = __m & __memory_order_mask;
817 __glibcxx_assert(__b != memory_order_release);
818 __glibcxx_assert(__b != memory_order_acq_rel);
820 return __atomic_load_n(&_M_p,
int(__m));
823 _GLIBCXX_ALWAYS_INLINE __pointer_type
824 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
827 = __m & __memory_order_mask;
828 __glibcxx_assert(__b != memory_order_release);
829 __glibcxx_assert(__b != memory_order_acq_rel);
831 return __atomic_load_n(&_M_p,
int(__m));
834 _GLIBCXX_ALWAYS_INLINE __pointer_type
838 return __atomic_exchange_n(&_M_p, __p,
int(__m));
842 _GLIBCXX_ALWAYS_INLINE __pointer_type
844 memory_order __m = memory_order_seq_cst)
volatile noexcept
846 return __atomic_exchange_n(&_M_p, __p,
int(__m));
849 _GLIBCXX_ALWAYS_INLINE
bool
850 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
854 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
856 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
857 int(__m1),
int(__m2));
860 _GLIBCXX_ALWAYS_INLINE
bool
861 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
865 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
867 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
868 int(__m1),
int(__m2));
871 _GLIBCXX_ALWAYS_INLINE
bool
872 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
876 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
878 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
879 int(__m1),
int(__m2));
882 _GLIBCXX_ALWAYS_INLINE
bool
883 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
887 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
889 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
890 int(__m1),
int(__m2));
893#if __cpp_lib_atomic_wait
894 _GLIBCXX_ALWAYS_INLINE
void
895 wait(__pointer_type __old,
898 std::__atomic_wait_address_v(&_M_p, __old,
900 {
return this->load(__m); });
905 _GLIBCXX_ALWAYS_INLINE
void
906 notify_one()
const noexcept
907 { std::__atomic_notify_address(&_M_p,
false); }
911 _GLIBCXX_ALWAYS_INLINE
void
912 notify_all()
const noexcept
913 { std::__atomic_notify_address(&_M_p,
true); }
918 _GLIBCXX_ALWAYS_INLINE __pointer_type
919 fetch_add(ptrdiff_t __d,
921 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
923 _GLIBCXX_ALWAYS_INLINE __pointer_type
924 fetch_add(ptrdiff_t __d,
925 memory_order __m = memory_order_seq_cst)
volatile noexcept
926 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
928 _GLIBCXX_ALWAYS_INLINE __pointer_type
929 fetch_sub(ptrdiff_t __d,
931 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
933 _GLIBCXX_ALWAYS_INLINE __pointer_type
934 fetch_sub(ptrdiff_t __d,
935 memory_order __m = memory_order_seq_cst)
volatile noexcept
936 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
939#if __cplusplus > 201703L
941 namespace __atomic_impl
944 template<
typename _Tp>
948 template<
typename _Tp>
949 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
951 template<
size_t _Size,
size_t _Align>
952 _GLIBCXX_ALWAYS_INLINE
bool
953 is_lock_free() noexcept
956 return __atomic_is_lock_free(_Size,
reinterpret_cast<void *
>(-_Align));
959 template<
typename _Tp>
960 _GLIBCXX_ALWAYS_INLINE
void
961 store(_Tp* __ptr, _Val<_Tp> __t,
memory_order __m)
noexcept
964 template<
typename _Tp>
965 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
968 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
969 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
970 __atomic_load(__ptr, __dest,
int(__m));
974 template<
typename _Tp>
975 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
978 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
979 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
984 template<
typename _Tp>
985 _GLIBCXX_ALWAYS_INLINE
bool
986 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
990 __glibcxx_assert(__is_valid_cmpexch_failure_order(__failure));
994 int(__success),
int(__failure));
997 template<
typename _Tp>
998 _GLIBCXX_ALWAYS_INLINE
bool
999 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1003 __glibcxx_assert(__is_valid_cmpexch_failure_order(__failure));
1007 int(__success),
int(__failure));
1010#if __cpp_lib_atomic_wait
1011 template<
typename _Tp>
1012 _GLIBCXX_ALWAYS_INLINE
void
1013 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1016 std::__atomic_wait_address_v(__ptr, __old,
1017 [__ptr, __m]() {
return __atomic_impl::load(__ptr, __m); });
1022 template<
typename _Tp>
1023 _GLIBCXX_ALWAYS_INLINE
void
1024 notify_one(
const _Tp* __ptr)
noexcept
1025 { std::__atomic_notify_address(__ptr,
false); }
1029 template<
typename _Tp>
1030 _GLIBCXX_ALWAYS_INLINE
void
1031 notify_all(
const _Tp* __ptr)
noexcept
1032 { std::__atomic_notify_address(__ptr,
true); }
1037 template<
typename _Tp>
1038 _GLIBCXX_ALWAYS_INLINE _Tp
1039 fetch_add(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m)
noexcept
1040 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1042 template<
typename _Tp>
1043 _GLIBCXX_ALWAYS_INLINE _Tp
1044 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m)
noexcept
1045 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1047 template<
typename _Tp>
1048 _GLIBCXX_ALWAYS_INLINE _Tp
1049 fetch_and(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
1050 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1052 template<
typename _Tp>
1053 _GLIBCXX_ALWAYS_INLINE _Tp
1054 fetch_or(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
1055 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1057 template<
typename _Tp>
1058 _GLIBCXX_ALWAYS_INLINE _Tp
1059 fetch_xor(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
1060 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1062 template<
typename _Tp>
1063 _GLIBCXX_ALWAYS_INLINE _Tp
1064 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1065 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1067 template<
typename _Tp>
1068 _GLIBCXX_ALWAYS_INLINE _Tp
1069 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1070 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1072 template<
typename _Tp>
1073 _GLIBCXX_ALWAYS_INLINE _Tp
1074 __and_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1075 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1077 template<
typename _Tp>
1078 _GLIBCXX_ALWAYS_INLINE _Tp
1079 __or_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1080 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1082 template<
typename _Tp>
1083 _GLIBCXX_ALWAYS_INLINE _Tp
1084 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1085 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1087 template<
typename _Tp>
1089 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
1091 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1092 _Val<_Tp> __newval = __oldval + __i;
1093 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1094 memory_order_relaxed))
1095 __newval = __oldval + __i;
1099 template<
typename _Tp>
1101 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
1103 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1104 _Val<_Tp> __newval = __oldval - __i;
1105 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1106 memory_order_relaxed))
1107 __newval = __oldval - __i;
1111 template<
typename _Tp>
1113 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1115 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1116 _Val<_Tp> __newval = __oldval + __i;
1117 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1118 memory_order_seq_cst,
1119 memory_order_relaxed))
1120 __newval = __oldval + __i;
1124 template<
typename _Tp>
1126 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1128 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1129 _Val<_Tp> __newval = __oldval - __i;
1130 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1131 memory_order_seq_cst,
1132 memory_order_relaxed))
1133 __newval = __oldval - __i;
1139 template<
typename _Fp>
1140 struct __atomic_float
1142 static_assert(is_floating_point_v<_Fp>);
1144 static constexpr size_t _S_alignment = __alignof__(_Fp);
1147 using value_type = _Fp;
1148 using difference_type = value_type;
1150 static constexpr bool is_always_lock_free
1151 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1153 __atomic_float() =
default;
1156 __atomic_float(_Fp __t) : _M_fp(__t)
1159 __atomic_float(
const __atomic_float&) =
delete;
1160 __atomic_float& operator=(
const __atomic_float&) =
delete;
1161 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1164 operator=(_Fp __t)
volatile noexcept
1171 operator=(_Fp __t)
noexcept
1178 is_lock_free() const volatile noexcept
1179 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1182 is_lock_free() const noexcept
1183 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1186 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
volatile noexcept
1187 { __atomic_impl::store(&_M_fp, __t, __m); }
1190 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
noexcept
1191 { __atomic_impl::store(&_M_fp, __t, __m); }
1194 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
1195 {
return __atomic_impl::load(&_M_fp, __m); }
1198 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1199 {
return __atomic_impl::load(&_M_fp, __m); }
1201 operator _Fp() const volatile noexcept {
return this->load(); }
1202 operator _Fp() const noexcept {
return this->load(); }
1206 memory_order __m = memory_order_seq_cst)
volatile noexcept
1207 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1212 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1215 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1219 return __atomic_impl::compare_exchange_weak(&_M_fp,
1220 __expected, __desired,
1221 __success, __failure);
1225 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1229 return __atomic_impl::compare_exchange_weak(&_M_fp,
1230 __expected, __desired,
1231 __success, __failure);
1235 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1239 return __atomic_impl::compare_exchange_strong(&_M_fp,
1240 __expected, __desired,
1241 __success, __failure);
1245 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1249 return __atomic_impl::compare_exchange_strong(&_M_fp,
1250 __expected, __desired,
1251 __success, __failure);
1255 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1259 return compare_exchange_weak(__expected, __desired, __order,
1260 __cmpexch_failure_order(__order));
1264 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1268 return compare_exchange_weak(__expected, __desired, __order,
1269 __cmpexch_failure_order(__order));
1273 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1277 return compare_exchange_strong(__expected, __desired, __order,
1278 __cmpexch_failure_order(__order));
1282 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1286 return compare_exchange_strong(__expected, __desired, __order,
1287 __cmpexch_failure_order(__order));
1290#if __cpp_lib_atomic_wait
1291 _GLIBCXX_ALWAYS_INLINE
void
1292 wait(_Fp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1293 { __atomic_impl::wait(&_M_fp, __old, __m); }
1297 _GLIBCXX_ALWAYS_INLINE
void
1298 notify_one() const noexcept
1299 { __atomic_impl::notify_one(&_M_fp); }
1303 _GLIBCXX_ALWAYS_INLINE
void
1304 notify_all() const noexcept
1305 { __atomic_impl::notify_all(&_M_fp); }
1311 fetch_add(value_type __i,
1313 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1316 fetch_add(value_type __i,
1317 memory_order __m = memory_order_seq_cst)
volatile noexcept
1318 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1321 fetch_sub(value_type __i,
1323 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1326 fetch_sub(value_type __i,
1327 memory_order __m = memory_order_seq_cst)
volatile noexcept
1328 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1331 operator+=(value_type __i)
noexcept
1332 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1335 operator+=(value_type __i)
volatile noexcept
1336 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1339 operator-=(value_type __i)
noexcept
1340 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1343 operator-=(value_type __i)
volatile noexcept
1344 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1347 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1349#undef _GLIBCXX20_INIT
1351 template<
typename _Tp,
1352 bool = is_integral_v<_Tp>,
bool = is_floating_point_v<_Tp>>
1353 struct __atomic_ref;
1356 template<
typename _Tp>
1357 struct __atomic_ref<_Tp, false, false>
1359 static_assert(is_trivially_copyable_v<_Tp>);
1362 static constexpr int _S_min_alignment
1363 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1367 using value_type = _Tp;
1369 static constexpr bool is_always_lock_free
1370 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1372 static constexpr size_t required_alignment
1373 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1375 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1379 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1381 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1384 operator=(_Tp __t)
const noexcept
1390 operator _Tp() const noexcept {
return this->load(); }
1393 is_lock_free() const noexcept
1394 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1397 store(_Tp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1398 { __atomic_impl::store(_M_ptr, __t, __m); }
1401 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1402 {
return __atomic_impl::load(_M_ptr, __m); }
1407 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1410 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1414 return __atomic_impl::compare_exchange_weak(_M_ptr,
1415 __expected, __desired,
1416 __success, __failure);
1420 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1424 return __atomic_impl::compare_exchange_strong(_M_ptr,
1425 __expected, __desired,
1426 __success, __failure);
1430 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1434 return compare_exchange_weak(__expected, __desired, __order,
1435 __cmpexch_failure_order(__order));
1439 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1443 return compare_exchange_strong(__expected, __desired, __order,
1444 __cmpexch_failure_order(__order));
1447#if __cpp_lib_atomic_wait
1448 _GLIBCXX_ALWAYS_INLINE
void
1449 wait(_Tp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1450 { __atomic_impl::wait(_M_ptr, __old, __m); }
1454 _GLIBCXX_ALWAYS_INLINE
void
1455 notify_one() const noexcept
1456 { __atomic_impl::notify_one(_M_ptr); }
1460 _GLIBCXX_ALWAYS_INLINE
void
1461 notify_all() const noexcept
1462 { __atomic_impl::notify_all(_M_ptr); }
1472 template<
typename _Tp>
1473 struct __atomic_ref<_Tp, true, false>
1475 static_assert(is_integral_v<_Tp>);
1478 using value_type = _Tp;
1479 using difference_type = value_type;
1481 static constexpr bool is_always_lock_free
1482 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1484 static constexpr size_t required_alignment
1485 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) : alignof(_Tp);
1487 __atomic_ref() =
delete;
1488 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1491 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1492 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1494 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1497 operator=(_Tp __t)
const noexcept
1503 operator _Tp() const noexcept {
return this->load(); }
1506 is_lock_free() const noexcept
1508 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1512 store(_Tp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1513 { __atomic_impl::store(_M_ptr, __t, __m); }
1516 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1517 {
return __atomic_impl::load(_M_ptr, __m); }
1521 memory_order __m = memory_order_seq_cst)
const noexcept
1522 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1525 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1529 return __atomic_impl::compare_exchange_weak(_M_ptr,
1530 __expected, __desired,
1531 __success, __failure);
1535 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1539 return __atomic_impl::compare_exchange_strong(_M_ptr,
1540 __expected, __desired,
1541 __success, __failure);
1545 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1549 return compare_exchange_weak(__expected, __desired, __order,
1550 __cmpexch_failure_order(__order));
1554 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1558 return compare_exchange_strong(__expected, __desired, __order,
1559 __cmpexch_failure_order(__order));
1562#if __cpp_lib_atomic_wait
1563 _GLIBCXX_ALWAYS_INLINE
void
1564 wait(_Tp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1565 { __atomic_impl::wait(_M_ptr, __old, __m); }
1569 _GLIBCXX_ALWAYS_INLINE
void
1570 notify_one() const noexcept
1571 { __atomic_impl::notify_one(_M_ptr); }
1575 _GLIBCXX_ALWAYS_INLINE
void
1576 notify_all() const noexcept
1577 { __atomic_impl::notify_all(_M_ptr); }
1583 fetch_add(value_type __i,
1584 memory_order __m = memory_order_seq_cst)
const noexcept
1585 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1588 fetch_sub(value_type __i,
1589 memory_order __m = memory_order_seq_cst)
const noexcept
1590 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1593 fetch_and(value_type __i,
1594 memory_order __m = memory_order_seq_cst)
const noexcept
1595 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1598 fetch_or(value_type __i,
1599 memory_order __m = memory_order_seq_cst)
const noexcept
1600 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1603 fetch_xor(value_type __i,
1604 memory_order __m = memory_order_seq_cst)
const noexcept
1605 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1607 _GLIBCXX_ALWAYS_INLINE value_type
1608 operator++(
int)
const noexcept
1609 {
return fetch_add(1); }
1611 _GLIBCXX_ALWAYS_INLINE value_type
1612 operator--(
int)
const noexcept
1613 {
return fetch_sub(1); }
1616 operator++() const noexcept
1617 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1620 operator--() const noexcept
1621 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1624 operator+=(value_type __i)
const noexcept
1625 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1628 operator-=(value_type __i)
const noexcept
1629 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1632 operator&=(value_type __i)
const noexcept
1633 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1636 operator|=(value_type __i)
const noexcept
1637 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1640 operator^=(value_type __i)
const noexcept
1641 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1648 template<
typename _Fp>
1649 struct __atomic_ref<_Fp, false, true>
1651 static_assert(is_floating_point_v<_Fp>);
1654 using value_type = _Fp;
1655 using difference_type = value_type;
1657 static constexpr bool is_always_lock_free
1658 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1660 static constexpr size_t required_alignment = __alignof__(_Fp);
1662 __atomic_ref() =
delete;
1663 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1666 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1667 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1669 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1672 operator=(_Fp __t)
const noexcept
1678 operator _Fp() const noexcept {
return this->load(); }
1681 is_lock_free() const noexcept
1683 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1687 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1688 { __atomic_impl::store(_M_ptr, __t, __m); }
1691 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1692 {
return __atomic_impl::load(_M_ptr, __m); }
1696 memory_order __m = memory_order_seq_cst)
const noexcept
1697 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1700 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1704 return __atomic_impl::compare_exchange_weak(_M_ptr,
1705 __expected, __desired,
1706 __success, __failure);
1710 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1714 return __atomic_impl::compare_exchange_strong(_M_ptr,
1715 __expected, __desired,
1716 __success, __failure);
1720 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1724 return compare_exchange_weak(__expected, __desired, __order,
1725 __cmpexch_failure_order(__order));
1729 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1733 return compare_exchange_strong(__expected, __desired, __order,
1734 __cmpexch_failure_order(__order));
1737#if __cpp_lib_atomic_wait
1738 _GLIBCXX_ALWAYS_INLINE
void
1739 wait(_Fp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1740 { __atomic_impl::wait(_M_ptr, __old, __m); }
1744 _GLIBCXX_ALWAYS_INLINE
void
1745 notify_one() const noexcept
1746 { __atomic_impl::notify_one(_M_ptr); }
1750 _GLIBCXX_ALWAYS_INLINE
void
1751 notify_all() const noexcept
1752 { __atomic_impl::notify_all(_M_ptr); }
1758 fetch_add(value_type __i,
1759 memory_order __m = memory_order_seq_cst)
const noexcept
1760 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1763 fetch_sub(value_type __i,
1764 memory_order __m = memory_order_seq_cst)
const noexcept
1765 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1768 operator+=(value_type __i)
const noexcept
1769 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1772 operator-=(value_type __i)
const noexcept
1773 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1780 template<
typename _Tp>
1781 struct __atomic_ref<_Tp*,
false,
false>
1784 using value_type = _Tp*;
1785 using difference_type = ptrdiff_t;
1787 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1789 static constexpr size_t required_alignment = __alignof__(_Tp*);
1791 __atomic_ref() =
delete;
1792 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1796 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1798 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1801 operator=(_Tp* __t)
const noexcept
1807 operator _Tp*()
const noexcept {
return this->load(); }
1810 is_lock_free() const noexcept
1812 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1816 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1817 { __atomic_impl::store(_M_ptr, __t, __m); }
1820 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1821 {
return __atomic_impl::load(_M_ptr, __m); }
1825 memory_order __m = memory_order_seq_cst)
const noexcept
1826 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1829 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1833 return __atomic_impl::compare_exchange_weak(_M_ptr,
1834 __expected, __desired,
1835 __success, __failure);
1839 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1843 return __atomic_impl::compare_exchange_strong(_M_ptr,
1844 __expected, __desired,
1845 __success, __failure);
1849 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1853 return compare_exchange_weak(__expected, __desired, __order,
1854 __cmpexch_failure_order(__order));
1858 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1862 return compare_exchange_strong(__expected, __desired, __order,
1863 __cmpexch_failure_order(__order));
1866#if __cpp_lib_atomic_wait
1867 _GLIBCXX_ALWAYS_INLINE
void
1868 wait(_Tp* __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1869 { __atomic_impl::wait(_M_ptr, __old, __m); }
1873 _GLIBCXX_ALWAYS_INLINE
void
1874 notify_one() const noexcept
1875 { __atomic_impl::notify_one(_M_ptr); }
1879 _GLIBCXX_ALWAYS_INLINE
void
1880 notify_all() const noexcept
1881 { __atomic_impl::notify_all(_M_ptr); }
1886 _GLIBCXX_ALWAYS_INLINE value_type
1887 fetch_add(difference_type __d,
1888 memory_order __m = memory_order_seq_cst)
const noexcept
1889 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
1891 _GLIBCXX_ALWAYS_INLINE value_type
1892 fetch_sub(difference_type __d,
1893 memory_order __m = memory_order_seq_cst)
const noexcept
1894 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
1897 operator++(
int)
const noexcept
1898 {
return fetch_add(1); }
1901 operator--(
int)
const noexcept
1902 {
return fetch_sub(1); }
1905 operator++() const noexcept
1907 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
1911 operator--() const noexcept
1913 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
1917 operator+=(difference_type __d)
const noexcept
1919 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
1923 operator-=(difference_type __d)
const noexcept
1925 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
1929 static constexpr ptrdiff_t
1930 _S_type_size(ptrdiff_t __d)
noexcept
1932 static_assert(is_object_v<_Tp>);
1933 return __d *
sizeof(_Tp);
1943_GLIBCXX_END_NAMESPACE_VERSION
typename remove_volatile< _Tp >::type remove_volatile_t
Alias template for remove_volatile.
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
ISO C++ entities toplevel namespace is std.
bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
constexpr _Tp exchange(_Tp &__obj, _Up &&__new_val) noexcept(__and_< is_nothrow_move_constructible< _Tp >, is_nothrow_assignable< _Tp &, _Up > >::value)
Assign __new_val to __obj and return its previous value.
bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Base class for atomic integrals.
Base type for atomic_flag.