3 // Copyright (C) 2003-2013 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file include/mutex
26 * This is a Standard C++ Library header.
29 #ifndef _GLIBCXX_MUTEX
30 #define _GLIBCXX_MUTEX 1
32 #pragma GCC system_header
34 #if __cplusplus < 201103L
35 # include <bits/c++0x_warning.h>
41 #include <type_traits>
43 #include <system_error>
44 #include <bits/functexcept.h>
45 #include <bits/gthr.h>
46 #include <bits/move.h> // for std::swap
48 #ifdef _GLIBCXX_USE_C99_STDINT_TR1
50 namespace std _GLIBCXX_VISIBILITY(default)
52 _GLIBCXX_BEGIN_NAMESPACE_VERSION
54 #ifdef _GLIBCXX_HAS_GTHREADS
55 // Common base class for std::mutex and std::timed_mutex
59 typedef __gthread_mutex_t __native_type;
61 #ifdef __GTHREAD_MUTEX_INIT
62 __native_type _M_mutex = __GTHREAD_MUTEX_INIT;
64 constexpr __mutex_base() noexcept = default;
66 __native_type _M_mutex;
68 __mutex_base() noexcept
70 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
71 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
74 ~__mutex_base() noexcept { __gthread_mutex_destroy(&_M_mutex); }
77 __mutex_base(const __mutex_base&) = delete;
78 __mutex_base& operator=(const __mutex_base&) = delete;
81 // Common base class for std::recursive_mutex and std::timed_recursive_mutex
82 class __recursive_mutex_base
85 typedef __gthread_recursive_mutex_t __native_type;
87 __recursive_mutex_base(const __recursive_mutex_base&) = delete;
88 __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
90 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
91 __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
93 __recursive_mutex_base() = default;
95 __native_type _M_mutex;
97 __recursive_mutex_base()
99 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
100 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
103 ~__recursive_mutex_base()
104 { __gthread_recursive_mutex_destroy(&_M_mutex); }
109 * @defgroup mutexes Mutexes
110 * @ingroup concurrency
112 * Classes for mutex support.
117 class mutex : private __mutex_base
120 typedef __native_type* native_handle_type;
122 #ifdef __GTHREAD_MUTEX_INIT
125 mutex() noexcept = default;
128 mutex(const mutex&) = delete;
129 mutex& operator=(const mutex&) = delete;
134 int __e = __gthread_mutex_lock(&_M_mutex);
136 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
138 __throw_system_error(__e);
144 // XXX EINVAL, EAGAIN, EBUSY
145 return !__gthread_mutex_trylock(&_M_mutex);
151 // XXX EINVAL, EAGAIN, EPERM
152 __gthread_mutex_unlock(&_M_mutex);
157 { return &_M_mutex; }
161 class recursive_mutex : private __recursive_mutex_base
164 typedef __native_type* native_handle_type;
166 recursive_mutex() = default;
167 ~recursive_mutex() = default;
169 recursive_mutex(const recursive_mutex&) = delete;
170 recursive_mutex& operator=(const recursive_mutex&) = delete;
175 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
177 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
179 __throw_system_error(__e);
185 // XXX EINVAL, EAGAIN, EBUSY
186 return !__gthread_recursive_mutex_trylock(&_M_mutex);
192 // XXX EINVAL, EAGAIN, EBUSY
193 __gthread_recursive_mutex_unlock(&_M_mutex);
198 { return &_M_mutex; }
201 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
203 class timed_mutex : private __mutex_base
205 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
206 typedef chrono::steady_clock __clock_t;
208 typedef chrono::high_resolution_clock __clock_t;
212 typedef __native_type* native_handle_type;
214 timed_mutex() = default;
215 ~timed_mutex() = default;
217 timed_mutex(const timed_mutex&) = delete;
218 timed_mutex& operator=(const timed_mutex&) = delete;
223 int __e = __gthread_mutex_lock(&_M_mutex);
225 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
227 __throw_system_error(__e);
233 // XXX EINVAL, EAGAIN, EBUSY
234 return !__gthread_mutex_trylock(&_M_mutex);
237 template <class _Rep, class _Period>
239 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
240 { return _M_try_lock_for(__rtime); }
242 template <class _Clock, class _Duration>
244 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
245 { return _M_try_lock_until(__atime); }
250 // XXX EINVAL, EAGAIN, EBUSY
251 __gthread_mutex_unlock(&_M_mutex);
256 { return &_M_mutex; }
259 template<typename _Rep, typename _Period>
261 _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
263 auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
264 if (ratio_greater<__clock_t::period, _Period>())
267 return _M_try_lock_until(__clock_t::now() + __rt);
270 template<typename _Duration>
272 _M_try_lock_until(const chrono::time_point<__clock_t,
275 chrono::time_point<__clock_t, chrono::seconds> __s =
276 chrono::time_point_cast<chrono::seconds>(__atime);
278 chrono::nanoseconds __ns =
279 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
281 __gthread_time_t __ts = {
282 static_cast<std::time_t>(__s.time_since_epoch().count()),
283 static_cast<long>(__ns.count())
286 return !__gthread_mutex_timedlock(native_handle(), &__ts);
289 template<typename _Clock, typename _Duration>
291 _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
292 { return _M_try_lock_for(__atime - _Clock::now()); }
295 /// recursive_timed_mutex
296 class recursive_timed_mutex : private __recursive_mutex_base
298 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
299 typedef chrono::steady_clock __clock_t;
301 typedef chrono::high_resolution_clock __clock_t;
305 typedef __native_type* native_handle_type;
307 recursive_timed_mutex() = default;
308 ~recursive_timed_mutex() = default;
310 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
311 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
316 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
318 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
320 __throw_system_error(__e);
326 // XXX EINVAL, EAGAIN, EBUSY
327 return !__gthread_recursive_mutex_trylock(&_M_mutex);
330 template <class _Rep, class _Period>
332 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
333 { return _M_try_lock_for(__rtime); }
335 template <class _Clock, class _Duration>
337 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
338 { return _M_try_lock_until(__atime); }
343 // XXX EINVAL, EAGAIN, EBUSY
344 __gthread_recursive_mutex_unlock(&_M_mutex);
349 { return &_M_mutex; }
352 template<typename _Rep, typename _Period>
354 _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
356 auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
357 if (ratio_greater<__clock_t::period, _Period>())
360 return _M_try_lock_until(__clock_t::now() + __rt);
363 template<typename _Duration>
365 _M_try_lock_until(const chrono::time_point<__clock_t,
368 chrono::time_point<__clock_t, chrono::seconds> __s =
369 chrono::time_point_cast<chrono::seconds>(__atime);
371 chrono::nanoseconds __ns =
372 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
374 __gthread_time_t __ts = {
375 static_cast<std::time_t>(__s.time_since_epoch().count()),
376 static_cast<long>(__ns.count())
379 return !__gthread_mutex_timedlock(native_handle(), &__ts);
382 template<typename _Clock, typename _Duration>
384 _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
385 { return _M_try_lock_for(__atime - _Clock::now()); }
388 #endif // _GLIBCXX_HAS_GTHREADS
390 /// Do not acquire ownership of the mutex.
391 struct defer_lock_t { };
393 /// Try to acquire ownership of the mutex without blocking.
394 struct try_to_lock_t { };
396 /// Assume the calling thread has already obtained mutex ownership
398 struct adopt_lock_t { };
400 constexpr defer_lock_t defer_lock { };
401 constexpr try_to_lock_t try_to_lock { };
402 constexpr adopt_lock_t adopt_lock { };
404 /// @brief Scoped lock idiom.
405 // Acquire the mutex here with a constructor call, then release with
406 // the destructor call in accordance with RAII style.
407 template<typename _Mutex>
411 typedef _Mutex mutex_type;
413 explicit lock_guard(mutex_type& __m) : _M_device(__m)
414 { _M_device.lock(); }
416 lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
417 { } // calling thread owns mutex
420 { _M_device.unlock(); }
422 lock_guard(const lock_guard&) = delete;
423 lock_guard& operator=(const lock_guard&) = delete;
426 mutex_type& _M_device;
430 template<typename _Mutex>
434 typedef _Mutex mutex_type;
436 unique_lock() noexcept
437 : _M_device(0), _M_owns(false)
440 explicit unique_lock(mutex_type& __m)
441 : _M_device(&__m), _M_owns(false)
447 unique_lock(mutex_type& __m, defer_lock_t) noexcept
448 : _M_device(&__m), _M_owns(false)
451 unique_lock(mutex_type& __m, try_to_lock_t)
452 : _M_device(&__m), _M_owns(_M_device->try_lock())
455 unique_lock(mutex_type& __m, adopt_lock_t)
456 : _M_device(&__m), _M_owns(true)
458 // XXX calling thread owns mutex
461 template<typename _Clock, typename _Duration>
462 unique_lock(mutex_type& __m,
463 const chrono::time_point<_Clock, _Duration>& __atime)
464 : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
467 template<typename _Rep, typename _Period>
468 unique_lock(mutex_type& __m,
469 const chrono::duration<_Rep, _Period>& __rtime)
470 : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
479 unique_lock(const unique_lock&) = delete;
480 unique_lock& operator=(const unique_lock&) = delete;
482 unique_lock(unique_lock&& __u) noexcept
483 : _M_device(__u._M_device), _M_owns(__u._M_owns)
489 unique_lock& operator=(unique_lock&& __u) noexcept
494 unique_lock(std::move(__u)).swap(*this);
506 __throw_system_error(int(errc::operation_not_permitted));
508 __throw_system_error(int(errc::resource_deadlock_would_occur));
520 __throw_system_error(int(errc::operation_not_permitted));
522 __throw_system_error(int(errc::resource_deadlock_would_occur));
525 _M_owns = _M_device->try_lock();
530 template<typename _Clock, typename _Duration>
532 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
535 __throw_system_error(int(errc::operation_not_permitted));
537 __throw_system_error(int(errc::resource_deadlock_would_occur));
540 _M_owns = _M_device->try_lock_until(__atime);
545 template<typename _Rep, typename _Period>
547 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
550 __throw_system_error(int(errc::operation_not_permitted));
552 __throw_system_error(int(errc::resource_deadlock_would_occur));
555 _M_owns = _M_device->try_lock_for(__rtime);
564 __throw_system_error(int(errc::operation_not_permitted));
573 swap(unique_lock& __u) noexcept
575 std::swap(_M_device, __u._M_device);
576 std::swap(_M_owns, __u._M_owns);
582 mutex_type* __ret = _M_device;
589 owns_lock() const noexcept
592 explicit operator bool() const noexcept
593 { return owns_lock(); }
596 mutex() const noexcept
597 { return _M_device; }
600 mutex_type* _M_device;
601 bool _M_owns; // XXX use atomic_bool
604 /// Partial specialization for unique_lock objects.
605 template<typename _Mutex>
607 swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept
613 template<typename... _Lock>
615 __do_unlock(tuple<_Lock&...>& __locks)
617 std::get<_Idx>(__locks).unlock();
618 __unlock_impl<_Idx - 1>::__do_unlock(__locks);
623 struct __unlock_impl<-1>
625 template<typename... _Lock>
627 __do_unlock(tuple<_Lock&...>&)
631 template<typename _Lock>
633 __try_to_lock(_Lock& __l)
634 { return unique_lock<_Lock>(__l, try_to_lock); }
636 template<int _Idx, bool _Continue = true>
637 struct __try_lock_impl
639 template<typename... _Lock>
641 __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
644 auto __lock = __try_to_lock(std::get<_Idx>(__locks));
645 if (__lock.owns_lock())
647 __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>::
648 __do_try_lock(__locks, __idx);
656 struct __try_lock_impl<_Idx, false>
658 template<typename... _Lock>
660 __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
663 auto __lock = __try_to_lock(std::get<_Idx>(__locks));
664 if (__lock.owns_lock())
672 /** @brief Generic try_lock.
673 * @param __l1 Meets Mutex requirements (try_lock() may throw).
674 * @param __l2 Meets Mutex requirements (try_lock() may throw).
675 * @param __l3 Meets Mutex requirements (try_lock() may throw).
676 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
677 * a 0-based index corresponding to the argument that returned false.
678 * @post Either all arguments are locked, or none will be.
680 * Sequentially calls try_lock() on each argument.
682 template<typename _Lock1, typename _Lock2, typename... _Lock3>
684 try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
687 auto __locks = std::tie(__l1, __l2, __l3...);
689 { __try_lock_impl<0>::__do_try_lock(__locks, __idx); }
695 /** @brief Generic lock.
696 * @param __l1 Meets Mutex requirements (try_lock() may throw).
697 * @param __l2 Meets Mutex requirements (try_lock() may throw).
698 * @param __l3 Meets Mutex requirements (try_lock() may throw).
699 * @throw An exception thrown by an argument's lock() or try_lock() member.
700 * @post All arguments are locked.
702 * All arguments are locked via a sequence of calls to lock(), try_lock()
703 * and unlock(). If the call exits via an exception any locks that were
704 * obtained will be released.
706 template<typename _L1, typename _L2, typename ..._L3>
708 lock(_L1& __l1, _L2& __l2, _L3&... __l3)
712 unique_lock<_L1> __first(__l1);
714 auto __locks = std::tie(__l2, __l3...);
715 __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx);
724 #ifdef _GLIBCXX_HAS_GTHREADS
729 typedef __gthread_once_t __native_type;
730 __native_type _M_once = __GTHREAD_ONCE_INIT;
734 constexpr once_flag() noexcept = default;
736 /// Deleted copy constructor
737 once_flag(const once_flag&) = delete;
738 /// Deleted assignment operator
739 once_flag& operator=(const once_flag&) = delete;
741 template<typename _Callable, typename... _Args>
743 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
746 #ifdef _GLIBCXX_HAVE_TLS
747 extern __thread void* __once_callable;
748 extern __thread void (*__once_call)();
750 template<typename _Callable>
754 (*(_Callable*)__once_callable)();
757 extern function<void()> __once_functor;
760 __set_once_functor_lock_ptr(unique_lock<mutex>*);
766 extern "C" void __once_proxy(void);
769 template<typename _Callable, typename... _Args>
771 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
773 #ifdef _GLIBCXX_HAVE_TLS
774 auto __bound_functor = std::__bind_simple(std::forward<_Callable>(__f),
775 std::forward<_Args>(__args)...);
776 __once_callable = &__bound_functor;
777 __once_call = &__once_call_impl<decltype(__bound_functor)>;
779 unique_lock<mutex> __functor_lock(__get_once_mutex());
780 auto __callable = std::__bind_simple(std::forward<_Callable>(__f),
781 std::forward<_Args>(__args)...);
782 __once_functor = [&]() { __callable(); };
783 __set_once_functor_lock_ptr(&__functor_lock);
786 int __e = __gthread_once(&(__once._M_once), &__once_proxy);
788 #ifndef _GLIBCXX_HAVE_TLS
790 __set_once_functor_lock_ptr(0);
794 __throw_system_error(__e);
796 #endif // _GLIBCXX_HAS_GTHREADS
799 _GLIBCXX_END_NAMESPACE_VERSION
801 #endif // _GLIBCXX_USE_C99_STDINT_TR1
805 #endif // _GLIBCXX_MUTEX