3// Copyright (C) 2003-2023 Free Software Foundation, Inc.
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
25/** @file include/mutex
26 * This is a Standard C++ Library header.
30#define _GLIBCXX_MUTEX 1
32#pragma GCC system_header
34#include <bits/requires_hosted.h> // concurrency
36#if __cplusplus < 201103L
37# include <bits/c++0x_warning.h>
43#include <bits/chrono.h>
44#include <bits/error_constants.h>
45#include <bits/std_mutex.h>
46#include <bits/unique_lock.h>
47#if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
48# include <condition_variable>
51#include <ext/atomicity.h> // __gnu_cxx::__is_single_threaded
53#if defined _GLIBCXX_HAS_GTHREADS && ! defined _GLIBCXX_HAVE_TLS
54# include <bits/std_function.h> // std::function
57namespace std _GLIBCXX_VISIBILITY(default)
59_GLIBCXX_BEGIN_NAMESPACE_VERSION
66#ifdef _GLIBCXX_HAS_GTHREADS
67 /// @cond undocumented
69 // Common base class for std::recursive_mutex and std::recursive_timed_mutex
70 class __recursive_mutex_base
73 typedef __gthread_recursive_mutex_t __native_type;
75 __recursive_mutex_base(const __recursive_mutex_base&) = delete;
76 __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
78#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
79 __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
81 __recursive_mutex_base() = default;
83 __native_type _M_mutex;
85 __recursive_mutex_base()
87 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
88 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
91 ~__recursive_mutex_base()
92 { __gthread_recursive_mutex_destroy(&_M_mutex); }
97 /** The standard recursive mutex type.
99 * A recursive mutex can be locked more than once by the same thread.
100 * Other threads cannot lock the mutex until the owning thread unlocks it
101 * as many times as it was locked.
106 class recursive_mutex : private __recursive_mutex_base
109 typedef __native_type* native_handle_type;
111 recursive_mutex() = default;
112 ~recursive_mutex() = default;
114 recursive_mutex(const recursive_mutex&) = delete;
115 recursive_mutex& operator=(const recursive_mutex&) = delete;
120 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
122 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
124 __throw_system_error(__e);
131 // XXX EINVAL, EAGAIN, EBUSY
132 return !__gthread_recursive_mutex_trylock(&_M_mutex);
138 // XXX EINVAL, EAGAIN, EBUSY
139 __gthread_recursive_mutex_unlock(&_M_mutex);
143 native_handle() noexcept
144 { return &_M_mutex; }
147#if _GTHREAD_USE_MUTEX_TIMEDLOCK
148 /// @cond undocumented
150 template<typename _Derived>
151 class __timed_mutex_impl
154 template<typename _Rep, typename _Period>
156 _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
158#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
159 using __clock = chrono::steady_clock;
161 using __clock = chrono::system_clock;
164 auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
165 if (ratio_greater<__clock::period, _Period>())
167 return _M_try_lock_until(__clock::now() + __rt);
170 template<typename _Duration>
172 _M_try_lock_until(const chrono::time_point<chrono::system_clock,
175 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
176 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
178 __gthread_time_t __ts = {
179 static_cast<std::time_t>(__s.time_since_epoch().count()),
180 static_cast<long>(__ns.count())
183 return static_cast<_Derived*>(this)->_M_timedlock(__ts);
186#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
187 template<typename _Duration>
189 _M_try_lock_until(const chrono::time_point<chrono::steady_clock,
192 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
193 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
195 __gthread_time_t __ts = {
196 static_cast<std::time_t>(__s.time_since_epoch().count()),
197 static_cast<long>(__ns.count())
200 return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
205 template<typename _Clock, typename _Duration>
207 _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
209#if __cplusplus > 201703L
210 static_assert(chrono::is_clock_v<_Clock>);
212 // The user-supplied clock may not tick at the same rate as
213 // steady_clock, so we must loop in order to guarantee that
214 // the timeout has expired before returning false.
215 auto __now = _Clock::now();
217 auto __rtime = __atime - __now;
218 if (_M_try_lock_for(__rtime))
220 __now = _Clock::now();
221 } while (__atime > __now);
227 /** The standard timed mutex type.
229 * A non-recursive mutex that supports a timeout when trying to acquire the
236 : private __mutex_base, public __timed_mutex_impl<timed_mutex>
239 typedef __native_type* native_handle_type;
241 timed_mutex() = default;
242 ~timed_mutex() = default;
244 timed_mutex(const timed_mutex&) = delete;
245 timed_mutex& operator=(const timed_mutex&) = delete;
250 int __e = __gthread_mutex_lock(&_M_mutex);
252 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
254 __throw_system_error(__e);
261 // XXX EINVAL, EAGAIN, EBUSY
262 return !__gthread_mutex_trylock(&_M_mutex);
265 template <class _Rep, class _Period>
268 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
269 { return _M_try_lock_for(__rtime); }
271 template <class _Clock, class _Duration>
274 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
275 { return _M_try_lock_until(__atime); }
280 // XXX EINVAL, EAGAIN, EBUSY
281 __gthread_mutex_unlock(&_M_mutex);
285 native_handle() noexcept
286 { return &_M_mutex; }
289 friend class __timed_mutex_impl<timed_mutex>;
292 _M_timedlock(const __gthread_time_t& __ts)
293 { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
295#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
297 _M_clocklock(clockid_t __clockid, const __gthread_time_t& __ts)
298 { return !pthread_mutex_clocklock(&_M_mutex, __clockid, &__ts); }
302 /** The standard recursive timed mutex type.
304 * A recursive mutex that supports a timeout when trying to acquire the
305 * lock. A recursive mutex can be locked more than once by the same thread.
306 * Other threads cannot lock the mutex until the owning thread unlocks it
307 * as many times as it was locked.
312 class recursive_timed_mutex
313 : private __recursive_mutex_base,
314 public __timed_mutex_impl<recursive_timed_mutex>
317 typedef __native_type* native_handle_type;
319 recursive_timed_mutex() = default;
320 ~recursive_timed_mutex() = default;
322 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
323 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
328 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
330 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
332 __throw_system_error(__e);
339 // XXX EINVAL, EAGAIN, EBUSY
340 return !__gthread_recursive_mutex_trylock(&_M_mutex);
343 template <class _Rep, class _Period>
346 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
347 { return _M_try_lock_for(__rtime); }
349 template <class _Clock, class _Duration>
352 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
353 { return _M_try_lock_until(__atime); }
358 // XXX EINVAL, EAGAIN, EBUSY
359 __gthread_recursive_mutex_unlock(&_M_mutex);
363 native_handle() noexcept
364 { return &_M_mutex; }
367 friend class __timed_mutex_impl<recursive_timed_mutex>;
370 _M_timedlock(const __gthread_time_t& __ts)
371 { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
373#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
375 _M_clocklock(clockid_t __clockid, const __gthread_time_t& __ts)
376 { return !pthread_mutex_clocklock(&_M_mutex, __clockid, &__ts); }
380#else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
386 condition_variable _M_cv;
387 bool _M_locked = false;
391 timed_mutex() = default;
392 ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
394 timed_mutex(const timed_mutex&) = delete;
395 timed_mutex& operator=(const timed_mutex&) = delete;
400 unique_lock<mutex> __lk(_M_mut);
401 _M_cv.wait(__lk, [&]{ return !_M_locked; });
409 lock_guard<mutex> __lk(_M_mut);
416 template<typename _Rep, typename _Period>
419 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
421 unique_lock<mutex> __lk(_M_mut);
422 if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
428 template<typename _Clock, typename _Duration>
431 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
433 unique_lock<mutex> __lk(_M_mut);
434 if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
443 lock_guard<mutex> __lk(_M_mut);
444 __glibcxx_assert( _M_locked );
450 /// recursive_timed_mutex
451 class recursive_timed_mutex
454 condition_variable _M_cv;
456 unsigned _M_count = 0;
458 // Predicate type that tests whether the current thread can lock a mutex.
461 // Returns true if the mutex is unlocked or is locked by _M_caller.
463 operator()() const noexcept
464 { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
466 const recursive_timed_mutex* _M_mx;
467 thread::id _M_caller;
472 recursive_timed_mutex() = default;
473 ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
475 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
476 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
481 auto __id = this_thread::get_id();
482 _Can_lock __can_lock{this, __id};
483 unique_lock<mutex> __lk(_M_mut);
484 _M_cv.wait(__lk, __can_lock);
486 __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
495 auto __id = this_thread::get_id();
496 _Can_lock __can_lock{this, __id};
497 lock_guard<mutex> __lk(_M_mut);
507 template<typename _Rep, typename _Period>
510 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
512 auto __id = this_thread::get_id();
513 _Can_lock __can_lock{this, __id};
514 unique_lock<mutex> __lk(_M_mut);
515 if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
524 template<typename _Clock, typename _Duration>
527 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
529 auto __id = this_thread::get_id();
530 _Can_lock __can_lock{this, __id};
531 unique_lock<mutex> __lk(_M_mut);
532 if (!_M_cv.wait_until(__lk, __atime, __can_lock))
544 lock_guard<mutex> __lk(_M_mut);
545 __glibcxx_assert( _M_owner == this_thread::get_id() );
546 __glibcxx_assert( _M_count > 0 );
556#endif // _GLIBCXX_HAS_GTHREADS
558 /// @cond undocumented
561 // Lock the last lockable, after all previous ones are locked.
562 template<typename _Lockable>
564 __try_lock_impl(_Lockable& __l)
566 if (unique_lock<_Lockable> __lock{__l, try_to_lock})
575 // Lock each lockable in turn.
576 // Use iteration if all lockables are the same type, recursion otherwise.
577 template<typename _L0, typename... _Lockables>
579 __try_lock_impl(_L0& __l0, _Lockables&... __lockables)
581#if __cplusplus >= 201703L
582 if constexpr ((is_same_v<_L0, _Lockables> && ...))
584 constexpr int _Np = 1 + sizeof...(_Lockables);
585 unique_lock<_L0> __locks[_Np] = {
586 {__l0, defer_lock}, {__lockables, defer_lock}...
588 for (int __i = 0; __i < _Np; ++__i)
590 if (!__locks[__i].try_lock())
592 const int __failed = __i;
594 __locks[__i].unlock();
598 for (auto& __l : __locks)
604 if (unique_lock<_L0> __lock{__l0, try_to_lock})
606 int __idx = __detail::__try_lock_impl(__lockables...);
618 } // namespace __detail
621 /** @brief Generic try_lock.
622 * @param __l1 Meets Lockable requirements (try_lock() may throw).
623 * @param __l2 Meets Lockable requirements (try_lock() may throw).
624 * @param __l3 Meets Lockable requirements (try_lock() may throw).
625 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
626 * a 0-based index corresponding to the argument that returned false.
627 * @post Either all arguments are locked, or none will be.
629 * Sequentially calls try_lock() on each argument.
631 template<typename _L1, typename _L2, typename... _L3>
634 try_lock(_L1& __l1, _L2& __l2, _L3&... __l3)
636 return __detail::__try_lock_impl(__l1, __l2, __l3...);
639 /// @cond undocumented
642 // This function can recurse up to N levels deep, for N = 1+sizeof...(L1).
643 // On each recursion the lockables are rotated left one position,
644 // e.g. depth 0: l0, l1, l2; depth 1: l1, l2, l0; depth 2: l2, l0, l1.
645 // When a call to l_i.try_lock() fails it recurses/returns to depth=i
646 // so that l_i is the first argument, and then blocks until l_i is locked.
647 template<typename _L0, typename... _L1>
649 __lock_impl(int& __i, int __depth, _L0& __l0, _L1&... __l1)
651 while (__i >= __depth)
655 int __failed = 1; // index that couldn't be locked
657 unique_lock<_L0> __first(__l0);
658 __failed += __detail::__try_lock_impl(__l1...);
661 __i = -1; // finished
666#if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
669 constexpr auto __n = 1 + sizeof...(_L1);
670 __i = (__depth + __failed) % __n;
672 else // rotate left until l_i is first.
673 __detail::__lock_impl(__i, __depth + 1, __l1..., __l0);
677 } // namespace __detail
680 /** @brief Generic lock.
681 * @param __l1 Meets Lockable requirements (try_lock() may throw).
682 * @param __l2 Meets Lockable requirements (try_lock() may throw).
683 * @param __l3 Meets Lockable requirements (try_lock() may throw).
684 * @throw An exception thrown by an argument's lock() or try_lock() member.
685 * @post All arguments are locked.
687 * All arguments are locked via a sequence of calls to lock(), try_lock()
688 * and unlock(). If this function exits via an exception any locks that
689 * were obtained will be released.
691 template<typename _L1, typename _L2, typename... _L3>
693 lock(_L1& __l1, _L2& __l2, _L3&... __l3)
695#if __cplusplus >= 201703L
696 if constexpr (is_same_v<_L1, _L2> && (is_same_v<_L1, _L3> && ...))
698 constexpr int _Np = 2 + sizeof...(_L3);
699 unique_lock<_L1> __locks[] = {
700 {__l1, defer_lock}, {__l2, defer_lock}, {__l3, defer_lock}...
704 __locks[__first].lock();
705 for (int __j = 1; __j < _Np; ++__j)
707 const int __idx = (__first + __j) % _Np;
708 if (!__locks[__idx].try_lock())
710 for (int __k = __j; __k != 0; --__k)
711 __locks[(__first + __k - 1) % _Np].unlock();
716 } while (!__locks[__first].owns_lock());
718 for (auto& __l : __locks)
725 __detail::__lock_impl(__i, 0, __l1, __l2, __l3...);
729#if __cplusplus >= 201703L
730#define __cpp_lib_scoped_lock 201703L
731 /** @brief A scoped lock type for multiple lockable objects.
733 * A scoped_lock controls mutex ownership within a scope, releasing
734 * ownership in the destructor.
739 template<typename... _MutexTypes>
743 explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
744 { std::lock(__m...); }
746 explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
747 : _M_devices(std::tie(__m...))
748 { } // calling thread owns mutex
751 { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
753 scoped_lock(const scoped_lock&) = delete;
754 scoped_lock& operator=(const scoped_lock&) = delete;
757 tuple<_MutexTypes&...> _M_devices;
764 explicit scoped_lock() = default;
765 explicit scoped_lock(adopt_lock_t) noexcept { }
766 ~scoped_lock() = default;
768 scoped_lock(const scoped_lock&) = delete;
769 scoped_lock& operator=(const scoped_lock&) = delete;
772 template<typename _Mutex>
773 class scoped_lock<_Mutex>
776 using mutex_type = _Mutex;
778 explicit scoped_lock(mutex_type& __m) : _M_device(__m)
779 { _M_device.lock(); }
781 explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
783 { } // calling thread owns mutex
786 { _M_device.unlock(); }
788 scoped_lock(const scoped_lock&) = delete;
789 scoped_lock& operator=(const scoped_lock&) = delete;
792 mutex_type& _M_device;
796#ifdef _GLIBCXX_HAS_GTHREADS
797 /// Flag type used by std::call_once
800 constexpr once_flag() noexcept = default;
802 /// Deleted copy constructor
803 once_flag(const once_flag&) = delete;
804 /// Deleted assignment operator
805 once_flag& operator=(const once_flag&) = delete;
808 // For gthreads targets a pthread_once_t is used with pthread_once, but
809 // for most targets this doesn't work correctly for exceptional executions.
810 __gthread_once_t _M_once = __GTHREAD_ONCE_INIT;
812 struct _Prepare_execution;
814 template<typename _Callable, typename... _Args>
816 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
819 /// @cond undocumented
820# ifdef _GLIBCXX_HAVE_TLS
821 // If TLS is available use thread-local state for the type-erased callable
822 // that is being run by std::call_once in the current thread.
823 extern __thread void* __once_callable;
824 extern __thread void (*__once_call)();
826 // RAII type to set up state for pthread_once call.
827 struct once_flag::_Prepare_execution
829 template<typename _Callable>
831 _Prepare_execution(_Callable& __c)
833 // Store address in thread-local pointer:
834 __once_callable = std::__addressof(__c);
835 // Trampoline function to invoke the closure via thread-local pointer:
836 __once_call = [] { (*static_cast<_Callable*>(__once_callable))(); };
839 ~_Prepare_execution()
841 // PR libstdc++/82481
842 __once_callable = nullptr;
843 __once_call = nullptr;
846 _Prepare_execution(const _Prepare_execution&) = delete;
847 _Prepare_execution& operator=(const _Prepare_execution&) = delete;
851 // Without TLS use a global std::mutex and store the callable in a
852 // global std::function.
853 extern function<void()> __once_functor;
856 __set_once_functor_lock_ptr(unique_lock<mutex>*);
861 // RAII type to set up state for pthread_once call.
862 struct once_flag::_Prepare_execution
864 template<typename _Callable>
866 _Prepare_execution(_Callable& __c)
868 // Store the callable in the global std::function
869 __once_functor = __c;
870 __set_once_functor_lock_ptr(&_M_functor_lock);
873 ~_Prepare_execution()
876 __set_once_functor_lock_ptr(nullptr);
880 // XXX This deadlocks if used recursively (PR 97949)
881 unique_lock<mutex> _M_functor_lock{__get_once_mutex()};
883 _Prepare_execution(const _Prepare_execution&) = delete;
884 _Prepare_execution& operator=(const _Prepare_execution&) = delete;
889 // This function is passed to pthread_once by std::call_once.
890 // It runs __once_call() or __once_functor().
891 extern "C" void __once_proxy(void);
893 /// Invoke a callable and synchronize with other calls using the same flag
894 template<typename _Callable, typename... _Args>
896 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
898 // Closure type that runs the function
899 auto __callable = [&] {
900 std::__invoke(std::forward<_Callable>(__f),
901 std::forward<_Args>(__args)...);
904 once_flag::_Prepare_execution __exec(__callable);
906 // XXX pthread_once does not reset the flag if an exception is thrown.
907 if (int __e = __gthread_once(&__once._M_once, &__once_proxy))
908 __throw_system_error(__e);
911#else // _GLIBCXX_HAS_GTHREADS
913 /// Flag type used by std::call_once
916 constexpr once_flag() noexcept = default;
918 /// Deleted copy constructor
919 once_flag(const once_flag&) = delete;
920 /// Deleted assignment operator
921 once_flag& operator=(const once_flag&) = delete;
924 // There are two different std::once_flag interfaces, abstracting four
925 // different implementations.
926 // The single-threaded interface uses the _M_activate() and _M_finish(bool)
927 // functions, which start and finish an active execution respectively.
928 // See [thread.once.callonce] in C++11 for the definition of
929 // active/passive/returning/exceptional executions.
930 enum _Bits : int { _Init = 0, _Active = 1, _Done = 2 };
932 int _M_once = _Bits::_Init;
934 // Check to see if all executions will be passive now.
936 _M_passive() const noexcept;
938 // Attempts to begin an active execution.
941 // Must be called to complete an active execution.
942 // The argument is true if the active execution was a returning execution,
943 // false if it was an exceptional execution.
944 void _M_finish(bool __returning) noexcept;
946 // RAII helper to call _M_finish.
947 struct _Active_execution
949 explicit _Active_execution(once_flag& __flag) : _M_flag(__flag) { }
951 ~_Active_execution() { _M_flag._M_finish(_M_returning); }
953 _Active_execution(const _Active_execution&) = delete;
954 _Active_execution& operator=(const _Active_execution&) = delete;
957 bool _M_returning = false;
960 template<typename _Callable, typename... _Args>
962 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
965 // Inline definitions of std::once_flag members for single-threaded targets.
968 once_flag::_M_passive() const noexcept
969 { return _M_once == _Bits::_Done; }
972 once_flag::_M_activate()
974 if (_M_once == _Bits::_Init) [[__likely__]]
976 _M_once = _Bits::_Active;
979 else if (_M_passive()) // Caller should have checked this already.
982 __throw_system_error(EDEADLK);
986 once_flag::_M_finish(bool __returning) noexcept
987 { _M_once = __returning ? _Bits::_Done : _Bits::_Init; }
989 /// Invoke a callable and synchronize with other calls using the same flag
990 template<typename _Callable, typename... _Args>
992 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
994 if (__once._M_passive())
996 else if (__once._M_activate())
998 once_flag::_Active_execution __exec(__once);
1000 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1001 // 2442. call_once() shouldn't DECAY_COPY()
1002 std::__invoke(std::forward<_Callable>(__f),
1003 std::forward<_Args>(__args)...);
1005 // __f(__args...) did not throw
1006 __exec._M_returning = true;
1009#endif // _GLIBCXX_HAS_GTHREADS
1011 /// @} group mutexes
1012_GLIBCXX_END_NAMESPACE_VERSION
1017#endif // _GLIBCXX_MUTEX