30#ifndef _GLIBCXX_ATOMIC_WAIT_H
31#define _GLIBCXX_ATOMIC_WAIT_H 1
33#pragma GCC system_header
36#if defined _GLIBCXX_HAS_GTHREADS || defined _GLIBCXX_HAVE_LINUX_FUTEX
41#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
51#define __cpp_lib_atomic_wait 201907L
53namespace std _GLIBCXX_VISIBILITY(default)
55_GLIBCXX_BEGIN_NAMESPACE_VERSION
58#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
59#define _GLIBCXX_HAVE_PLATFORM_WAIT 1
60 using __platform_wait_t = int;
61 static constexpr size_t __platform_wait_alignment = 4;
67 using __platform_wait_t = uint64_t;
68 static constexpr size_t __platform_wait_alignment
69 = __alignof__(__platform_wait_t);
73 template<
typename _Tp>
74 inline constexpr bool __platform_wait_uses_type
75#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
77 && ((
sizeof(_Tp) ==
sizeof(__detail::__platform_wait_t))
78 && (
alignof(_Tp*) >= __detail::__platform_wait_alignment));
85#ifdef _GLIBCXX_HAVE_LINUX_FUTEX
86 enum class __futex_wait_flags :
int
88#ifdef _GLIBCXX_HAVE_LINUX_FUTEX_PRIVATE
97 __wait_private = __wait | __private_flag,
98 __wake_private = __wake | __private_flag,
99 __wait_bitset_private = __wait_bitset | __private_flag,
100 __wake_bitset_private = __wake_bitset | __private_flag,
101 __bitset_match_any = -1
104 template<
typename _Tp>
106 __platform_wait(
const _Tp* __addr, __platform_wait_t __val)
noexcept
108 auto __e = syscall (SYS_futex,
static_cast<const void*
>(__addr),
109 static_cast<int>(__futex_wait_flags::__wait_private),
111 if (!__e || errno == EAGAIN)
114 __throw_system_error(errno);
117 template<
typename _Tp>
119 __platform_notify(
const _Tp* __addr,
bool __all)
noexcept
121 syscall (SYS_futex,
static_cast<const void*
>(__addr),
122 static_cast<int>(__futex_wait_flags::__wake_private),
123 __all ? INT_MAX : 1);
128 __thread_yield() noexcept
130#if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
136 __thread_relax() noexcept
138#if defined __i386__ || defined __x86_64__
139 __builtin_ia32_pause();
145 constexpr auto __atomic_spin_count_relax = 12;
146 constexpr auto __atomic_spin_count = 16;
148 struct __default_spin_policy
151 operator()() const noexcept
155 template<
typename _Pred,
156 typename _Spin = __default_spin_policy>
158 __atomic_spin(_Pred& __pred, _Spin __spin = _Spin{ })
noexcept
160 for (
auto __i = 0; __i < __atomic_spin_count; ++__i)
165 if (__i < __atomic_spin_count_relax)
166 __detail::__thread_relax();
168 __detail::__thread_yield();
181 template<
typename _Tp>
182 bool __atomic_compare(
const _Tp& __a,
const _Tp& __b)
185 return __builtin_memcmp(&__a, &__b,
sizeof(_Tp)) == 0;
188 struct __waiter_pool_base
190#ifdef __cpp_lib_hardware_interference_size
191 static constexpr auto _S_align = hardware_destructive_interference_size;
193 static constexpr auto _S_align = 64;
196 alignas(_S_align) __platform_wait_t _M_wait = 0;
198#ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
202 alignas(_S_align) __platform_wait_t _M_ver = 0;
204#ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
207 __waiter_pool_base() =
default;
210 _M_enter_wait() noexcept
211 { __atomic_fetch_add(&_M_wait, 1, __ATOMIC_SEQ_CST); }
214 _M_leave_wait() noexcept
215 { __atomic_fetch_sub(&_M_wait, 1, __ATOMIC_RELEASE); }
218 _M_waiting() const noexcept
220 __platform_wait_t __res;
221 __atomic_load(&_M_wait, &__res, __ATOMIC_SEQ_CST);
226 _M_notify(__platform_wait_t* __addr, [[maybe_unused]]
bool __all,
227 bool __bare)
noexcept
229#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
230 if (__addr == &_M_ver)
232 __atomic_fetch_add(__addr, 1, __ATOMIC_SEQ_CST);
236 if (__bare || _M_waiting())
237 __platform_notify(__addr, __all);
240 lock_guard<mutex> __l(_M_mtx);
241 __atomic_fetch_add(__addr, 1, __ATOMIC_RELAXED);
243 if (__bare || _M_waiting())
248 static __waiter_pool_base&
249 _S_for(
const void* __addr)
noexcept
251 constexpr uintptr_t __ct = 16;
252 static __waiter_pool_base __w[__ct];
253 auto __key = (uintptr_t(__addr) >> 2) % __ct;
258 struct __waiter_pool : __waiter_pool_base
261 _M_do_wait(
const __platform_wait_t* __addr, __platform_wait_t __old)
noexcept
263#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
264 __platform_wait(__addr, __old);
266 __platform_wait_t __val;
267 __atomic_load(__addr, &__val, __ATOMIC_SEQ_CST);
270 lock_guard<mutex> __l(_M_mtx);
271 __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
279 template<
typename _Tp>
282 using __waiter_type = _Tp;
285 __platform_wait_t* _M_addr;
287 template<
typename _Up>
288 static __platform_wait_t*
289 _S_wait_addr(
const _Up* __a, __platform_wait_t* __b)
291 if constexpr (__platform_wait_uses_type<_Up>)
292 return reinterpret_cast<__platform_wait_t*
>(
const_cast<_Up*
>(__a));
297 static __waiter_type&
298 _S_for(
const void* __addr)
noexcept
300 static_assert(
sizeof(__waiter_type) ==
sizeof(__waiter_pool_base));
301 auto& res = __waiter_pool_base::_S_for(__addr);
302 return reinterpret_cast<__waiter_type&
>(res);
305 template<
typename _Up>
306 explicit __waiter_base(
const _Up* __addr) noexcept
307 : _M_w(_S_for(__addr))
308 , _M_addr(_S_wait_addr(__addr, &_M_w._M_ver))
312 _M_notify(
bool __all,
bool __bare =
false) noexcept
313 { _M_w._M_notify(_M_addr, __all, __bare); }
315 template<
typename _Up,
typename _ValFn,
316 typename _Spin = __default_spin_policy>
318 _S_do_spin_v(__platform_wait_t* __addr,
319 const _Up& __old, _ValFn __vfn,
320 __platform_wait_t& __val,
321 _Spin __spin = _Spin{ })
323 auto const __pred = [=]
324 {
return !__detail::__atomic_compare(__old, __vfn()); };
326 if constexpr (__platform_wait_uses_type<_Up>)
328 __builtin_memcpy(&__val, &__old,
sizeof(__val));
332 __atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);
334 return __atomic_spin(__pred, __spin);
337 template<
typename _Up,
typename _ValFn,
338 typename _Spin = __default_spin_policy>
340 _M_do_spin_v(
const _Up& __old, _ValFn __vfn,
341 __platform_wait_t& __val,
342 _Spin __spin = _Spin{ })
343 {
return _S_do_spin_v(_M_addr, __old, __vfn, __val, __spin); }
345 template<
typename _Pred,
346 typename _Spin = __default_spin_policy>
348 _S_do_spin(
const __platform_wait_t* __addr,
350 __platform_wait_t& __val,
351 _Spin __spin = _Spin{ })
353 __atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);
354 return __atomic_spin(__pred, __spin);
357 template<
typename _Pred,
358 typename _Spin = __default_spin_policy>
360 _M_do_spin(_Pred __pred, __platform_wait_t& __val,
361 _Spin __spin = _Spin{ })
362 {
return _S_do_spin(_M_addr, __pred, __val, __spin); }
365 template<
typename _EntersWait>
366 struct __waiter : __waiter_base<__waiter_pool>
368 using __base_type = __waiter_base<__waiter_pool>;
370 template<
typename _Tp>
371 explicit __waiter(
const _Tp* __addr) noexcept
372 : __base_type(__addr)
374 if constexpr (_EntersWait::value)
375 _M_w._M_enter_wait();
380 if constexpr (_EntersWait::value)
381 _M_w._M_leave_wait();
384 template<
typename _Tp,
typename _ValFn>
386 _M_do_wait_v(_Tp __old, _ValFn __vfn)
390 __platform_wait_t __val;
391 if (__base_type::_M_do_spin_v(__old, __vfn, __val))
393 __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
395 while (__detail::__atomic_compare(__old, __vfn()));
398 template<
typename _Pred>
400 _M_do_wait(_Pred __pred)
noexcept
404 __platform_wait_t __val;
405 if (__base_type::_M_do_spin(__pred, __val))
407 __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
413 using __enters_wait = __waiter<std::true_type>;
414 using __bare_wait = __waiter<std::false_type>;
417 template<
typename _Tp,
typename _ValFn>
419 __atomic_wait_address_v(
const _Tp* __addr, _Tp __old,
420 _ValFn __vfn)
noexcept
422 __detail::__enters_wait __w(__addr);
423 __w._M_do_wait_v(__old, __vfn);
426 template<
typename _Tp,
typename _Pred>
428 __atomic_wait_address(
const _Tp* __addr, _Pred __pred)
noexcept
430 __detail::__enters_wait __w(__addr);
431 __w._M_do_wait(__pred);
435 template<
typename _Pred>
437 __atomic_wait_address_bare(
const __detail::__platform_wait_t* __addr,
438 _Pred __pred)
noexcept
440#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
443 __detail::__platform_wait_t __val;
444 if (__detail::__bare_wait::_S_do_spin(__addr, __pred, __val))
446 __detail::__platform_wait(__addr, __val);
450 __detail::__bare_wait __w(__addr);
451 __w._M_do_wait(__pred);
455 template<
typename _Tp>
457 __atomic_notify_address(
const _Tp* __addr,
bool __all)
noexcept
459 __detail::__bare_wait __w(__addr);
460 __w._M_notify(__all);
465 __atomic_notify_address_bare(
const __detail::__platform_wait_t* __addr,
468#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
469 __detail::__platform_notify(__addr, __all);
471 __detail::__bare_wait __w(__addr);
472 __w._M_notify(__all,
true);
475_GLIBCXX_END_NAMESPACE_VERSION
ISO C++ entities toplevel namespace is std.