3 // Copyright (C) 2003-2020 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file include/mutex
26 * This is a Standard C++ Library header.
29 #ifndef _GLIBCXX_MUTEX
30 #define _GLIBCXX_MUTEX 1
32 #pragma GCC system_header
34 #if __cplusplus < 201103L
35 # include <bits/c++0x_warning.h>
41 #include <type_traits>
42 #include <system_error>
43 #include <bits/std_mutex.h>
44 #include <bits/unique_lock.h>
45 #if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
46 # include <condition_variable>
49 #ifndef _GLIBCXX_HAVE_TLS
50 # include <bits/std_function.h>
53 namespace std _GLIBCXX_VISIBILITY(default)
55 _GLIBCXX_BEGIN_NAMESPACE_VERSION
62 #ifdef _GLIBCXX_HAS_GTHREADS
64 // Common base class for std::recursive_mutex and std::recursive_timed_mutex
65 class __recursive_mutex_base
68 typedef __gthread_recursive_mutex_t __native_type;
70 __recursive_mutex_base(const __recursive_mutex_base&) = delete;
71 __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
73 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
74 __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
76 __recursive_mutex_base() = default;
78 __native_type _M_mutex;
80 __recursive_mutex_base()
82 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
83 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
86 ~__recursive_mutex_base()
87 { __gthread_recursive_mutex_destroy(&_M_mutex); }
91 /// The standard recursive mutex type.
92 class recursive_mutex : private __recursive_mutex_base
95 typedef __native_type* native_handle_type;
97 recursive_mutex() = default;
98 ~recursive_mutex() = default;
100 recursive_mutex(const recursive_mutex&) = delete;
101 recursive_mutex& operator=(const recursive_mutex&) = delete;
106 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
108 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
110 __throw_system_error(__e);
116 // XXX EINVAL, EAGAIN, EBUSY
117 return !__gthread_recursive_mutex_trylock(&_M_mutex);
123 // XXX EINVAL, EAGAIN, EBUSY
124 __gthread_recursive_mutex_unlock(&_M_mutex);
128 native_handle() noexcept
129 { return &_M_mutex; }
132 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
133 template<typename _Derived>
134 class __timed_mutex_impl
137 template<typename _Rep, typename _Period>
139 _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
141 #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
142 using __clock = chrono::steady_clock;
144 using __clock = chrono::system_clock;
147 auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
148 if (ratio_greater<__clock::period, _Period>())
150 return _M_try_lock_until(__clock::now() + __rt);
153 template<typename _Duration>
155 _M_try_lock_until(const chrono::time_point<chrono::system_clock,
158 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
159 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
161 __gthread_time_t __ts = {
162 static_cast<std::time_t>(__s.time_since_epoch().count()),
163 static_cast<long>(__ns.count())
166 return static_cast<_Derived*>(this)->_M_timedlock(__ts);
169 #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
170 template<typename _Duration>
172 _M_try_lock_until(const chrono::time_point<chrono::steady_clock,
175 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
176 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
178 __gthread_time_t __ts = {
179 static_cast<std::time_t>(__s.time_since_epoch().count()),
180 static_cast<long>(__ns.count())
183 return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
188 template<typename _Clock, typename _Duration>
190 _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
192 #if __cplusplus > 201703L
193 static_assert(chrono::is_clock_v<_Clock>);
195 // The user-supplied clock may not tick at the same rate as
196 // steady_clock, so we must loop in order to guarantee that
197 // the timeout has expired before returning false.
198 auto __now = _Clock::now();
200 auto __rtime = __atime - __now;
201 if (_M_try_lock_for(__rtime))
203 __now = _Clock::now();
204 } while (__atime > __now);
209 /// The standard timed mutex type.
211 : private __mutex_base, public __timed_mutex_impl<timed_mutex>
214 typedef __native_type* native_handle_type;
216 timed_mutex() = default;
217 ~timed_mutex() = default;
219 timed_mutex(const timed_mutex&) = delete;
220 timed_mutex& operator=(const timed_mutex&) = delete;
225 int __e = __gthread_mutex_lock(&_M_mutex);
227 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
229 __throw_system_error(__e);
235 // XXX EINVAL, EAGAIN, EBUSY
236 return !__gthread_mutex_trylock(&_M_mutex);
239 template <class _Rep, class _Period>
241 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
242 { return _M_try_lock_for(__rtime); }
244 template <class _Clock, class _Duration>
246 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
247 { return _M_try_lock_until(__atime); }
252 // XXX EINVAL, EAGAIN, EBUSY
253 __gthread_mutex_unlock(&_M_mutex);
257 native_handle() noexcept
258 { return &_M_mutex; }
261 friend class __timed_mutex_impl<timed_mutex>;
264 _M_timedlock(const __gthread_time_t& __ts)
265 { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
267 #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
269 _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
270 { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
274 /// recursive_timed_mutex
275 class recursive_timed_mutex
276 : private __recursive_mutex_base,
277 public __timed_mutex_impl<recursive_timed_mutex>
280 typedef __native_type* native_handle_type;
282 recursive_timed_mutex() = default;
283 ~recursive_timed_mutex() = default;
285 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
286 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
291 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
293 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
295 __throw_system_error(__e);
301 // XXX EINVAL, EAGAIN, EBUSY
302 return !__gthread_recursive_mutex_trylock(&_M_mutex);
305 template <class _Rep, class _Period>
307 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
308 { return _M_try_lock_for(__rtime); }
310 template <class _Clock, class _Duration>
312 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
313 { return _M_try_lock_until(__atime); }
318 // XXX EINVAL, EAGAIN, EBUSY
319 __gthread_recursive_mutex_unlock(&_M_mutex);
323 native_handle() noexcept
324 { return &_M_mutex; }
327 friend class __timed_mutex_impl<recursive_timed_mutex>;
330 _M_timedlock(const __gthread_time_t& __ts)
331 { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
333 #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
335 _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
336 { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
340 #else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
346 condition_variable _M_cv;
347 bool _M_locked = false;
351 timed_mutex() = default;
352 ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
354 timed_mutex(const timed_mutex&) = delete;
355 timed_mutex& operator=(const timed_mutex&) = delete;
360 unique_lock<mutex> __lk(_M_mut);
361 _M_cv.wait(__lk, [&]{ return !_M_locked; });
368 lock_guard<mutex> __lk(_M_mut);
375 template<typename _Rep, typename _Period>
377 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
379 unique_lock<mutex> __lk(_M_mut);
380 if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
386 template<typename _Clock, typename _Duration>
388 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
390 unique_lock<mutex> __lk(_M_mut);
391 if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
400 lock_guard<mutex> __lk(_M_mut);
401 __glibcxx_assert( _M_locked );
407 /// recursive_timed_mutex
408 class recursive_timed_mutex
411 condition_variable _M_cv;
413 unsigned _M_count = 0;
415 // Predicate type that tests whether the current thread can lock a mutex.
418 // Returns true if the mutex is unlocked or is locked by _M_caller.
420 operator()() const noexcept
421 { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
423 const recursive_timed_mutex* _M_mx;
424 thread::id _M_caller;
429 recursive_timed_mutex() = default;
430 ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
432 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
433 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
438 auto __id = this_thread::get_id();
439 _Can_lock __can_lock{this, __id};
440 unique_lock<mutex> __lk(_M_mut);
441 _M_cv.wait(__lk, __can_lock);
443 __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
451 auto __id = this_thread::get_id();
452 _Can_lock __can_lock{this, __id};
453 lock_guard<mutex> __lk(_M_mut);
463 template<typename _Rep, typename _Period>
465 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
467 auto __id = this_thread::get_id();
468 _Can_lock __can_lock{this, __id};
469 unique_lock<mutex> __lk(_M_mut);
470 if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
479 template<typename _Clock, typename _Duration>
481 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
483 auto __id = this_thread::get_id();
484 _Can_lock __can_lock{this, __id};
485 unique_lock<mutex> __lk(_M_mut);
486 if (!_M_cv.wait_until(__lk, __atime, __can_lock))
498 lock_guard<mutex> __lk(_M_mut);
499 __glibcxx_assert( _M_owner == this_thread::get_id() );
500 __glibcxx_assert( _M_count > 0 );
510 #endif // _GLIBCXX_HAS_GTHREADS
512 /// @cond undocumented
513 template<typename _Lock>
514 inline unique_lock<_Lock>
515 __try_to_lock(_Lock& __l)
516 { return unique_lock<_Lock>{__l, try_to_lock}; }
518 template<int _Idx, bool _Continue = true>
519 struct __try_lock_impl
521 template<typename... _Lock>
523 __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
526 auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
527 if (__lock.owns_lock())
529 constexpr bool __cont = _Idx + 2 < sizeof...(_Lock);
530 using __try_locker = __try_lock_impl<_Idx + 1, __cont>;
531 __try_locker::__do_try_lock(__locks, __idx);
539 struct __try_lock_impl<_Idx, false>
541 template<typename... _Lock>
543 __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
546 auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
547 if (__lock.owns_lock())
556 /** @brief Generic try_lock.
557 * @param __l1 Meets Lockable requirements (try_lock() may throw).
558 * @param __l2 Meets Lockable requirements (try_lock() may throw).
559 * @param __l3 Meets Lockable requirements (try_lock() may throw).
560 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
561 * a 0-based index corresponding to the argument that returned false.
562 * @post Either all arguments are locked, or none will be.
564 * Sequentially calls try_lock() on each argument.
566 template<typename _Lock1, typename _Lock2, typename... _Lock3>
568 try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
571 auto __locks = std::tie(__l1, __l2, __l3...);
572 __try_lock_impl<0>::__do_try_lock(__locks, __idx);
576 /** @brief Generic lock.
577 * @param __l1 Meets Lockable requirements (try_lock() may throw).
578 * @param __l2 Meets Lockable requirements (try_lock() may throw).
579 * @param __l3 Meets Lockable requirements (try_lock() may throw).
580 * @throw An exception thrown by an argument's lock() or try_lock() member.
581 * @post All arguments are locked.
583 * All arguments are locked via a sequence of calls to lock(), try_lock()
584 * and unlock(). If the call exits via an exception any locks that were
585 * obtained will be released.
587 template<typename _L1, typename _L2, typename... _L3>
589 lock(_L1& __l1, _L2& __l2, _L3&... __l3)
593 using __try_locker = __try_lock_impl<0, sizeof...(_L3) != 0>;
594 unique_lock<_L1> __first(__l1);
596 auto __locks = std::tie(__l2, __l3...);
597 __try_locker::__do_try_lock(__locks, __idx);
606 #if __cplusplus >= 201703L
607 #define __cpp_lib_scoped_lock 201703
608 /** @brief A scoped lock type for multiple lockable objects.
610 * A scoped_lock controls mutex ownership within a scope, releasing
611 * ownership in the destructor.
613 template<typename... _MutexTypes>
617 explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
618 { std::lock(__m...); }
620 explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
621 : _M_devices(std::tie(__m...))
622 { } // calling thread owns mutex
625 { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
627 scoped_lock(const scoped_lock&) = delete;
628 scoped_lock& operator=(const scoped_lock&) = delete;
631 tuple<_MutexTypes&...> _M_devices;
638 explicit scoped_lock() = default;
639 explicit scoped_lock(adopt_lock_t) noexcept { }
640 ~scoped_lock() = default;
642 scoped_lock(const scoped_lock&) = delete;
643 scoped_lock& operator=(const scoped_lock&) = delete;
646 template<typename _Mutex>
647 class scoped_lock<_Mutex>
650 using mutex_type = _Mutex;
652 explicit scoped_lock(mutex_type& __m) : _M_device(__m)
653 { _M_device.lock(); }
655 explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
657 { } // calling thread owns mutex
660 { _M_device.unlock(); }
662 scoped_lock(const scoped_lock&) = delete;
663 scoped_lock& operator=(const scoped_lock&) = delete;
666 mutex_type& _M_device;
670 #ifdef _GLIBCXX_HAS_GTHREADS
671 /// Flag type used by std::call_once
675 typedef __gthread_once_t __native_type;
676 __native_type _M_once = __GTHREAD_ONCE_INIT;
680 constexpr once_flag() noexcept = default;
682 /// Deleted copy constructor
683 once_flag(const once_flag&) = delete;
684 /// Deleted assignment operator
685 once_flag& operator=(const once_flag&) = delete;
687 template<typename _Callable, typename... _Args>
689 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
692 /// @cond undocumented
693 #ifdef _GLIBCXX_HAVE_TLS
694 extern __thread void* __once_callable;
695 extern __thread void (*__once_call)();
697 extern function<void()> __once_functor;
700 __set_once_functor_lock_ptr(unique_lock<mutex>*);
706 extern "C" void __once_proxy(void);
709 /// Invoke a callable and synchronize with other calls using the same flag
710 template<typename _Callable, typename... _Args>
712 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
714 // _GLIBCXX_RESOLVE_LIB_DEFECTS
715 // 2442. call_once() shouldn't DECAY_COPY()
716 auto __callable = [&] {
717 std::__invoke(std::forward<_Callable>(__f),
718 std::forward<_Args>(__args)...);
720 #ifdef _GLIBCXX_HAVE_TLS
721 __once_callable = std::__addressof(__callable); // NOLINT: PR 82481
722 __once_call = []{ (*(decltype(__callable)*)__once_callable)(); };
724 unique_lock<mutex> __functor_lock(__get_once_mutex());
725 __once_functor = __callable;
726 __set_once_functor_lock_ptr(&__functor_lock);
729 int __e = __gthread_once(&__once._M_once, &__once_proxy);
731 #ifndef _GLIBCXX_HAVE_TLS
733 __set_once_functor_lock_ptr(0);
737 __throw_system_error(__e);
739 #endif // _GLIBCXX_HAS_GTHREADS
742 _GLIBCXX_END_NAMESPACE_VERSION
747 #endif // _GLIBCXX_MUTEX