libstdc++
shared_mutex
Go to the documentation of this file.
1 // <shared_mutex> -*- C++ -*-
2 
3 // Copyright (C) 2013-2019 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file include/shared_mutex
26  * This is a Standard C++ Library header.
27  */
28 
29 #ifndef _GLIBCXX_SHARED_MUTEX
30 #define _GLIBCXX_SHARED_MUTEX 1
31 
32 #pragma GCC system_header
33 
34 #if __cplusplus >= 201402L
35 
36 #include <bits/c++config.h>
37 #include <condition_variable>
38 #include <bits/functexcept.h>
39 
40 namespace std _GLIBCXX_VISIBILITY(default)
41 {
42 _GLIBCXX_BEGIN_NAMESPACE_VERSION
43 
44  /**
45  * @ingroup mutexes
46  * @{
47  */
48 
49 #ifdef _GLIBCXX_HAS_GTHREADS
50 
51 #if __cplusplus >= 201703L
52 #define __cpp_lib_shared_mutex 201505
53  class shared_mutex;
54 #endif
55 
56 #define __cpp_lib_shared_timed_mutex 201402
57  class shared_timed_mutex;
58 
59 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
60 #ifdef __gthrw
61 #define _GLIBCXX_GTHRW(name) \
62  __gthrw(pthread_ ## name); \
63  static inline int \
64  __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
65  { \
66  if (__gthread_active_p ()) \
67  return __gthrw_(pthread_ ## name) (__rwlock); \
68  else \
69  return 0; \
70  }
71  _GLIBCXX_GTHRW(rwlock_rdlock)
72  _GLIBCXX_GTHRW(rwlock_tryrdlock)
73  _GLIBCXX_GTHRW(rwlock_wrlock)
74  _GLIBCXX_GTHRW(rwlock_trywrlock)
75  _GLIBCXX_GTHRW(rwlock_unlock)
76 # ifndef PTHREAD_RWLOCK_INITIALIZER
77  _GLIBCXX_GTHRW(rwlock_destroy)
78  __gthrw(pthread_rwlock_init);
79  static inline int
80  __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
81  {
82  if (__gthread_active_p ())
83  return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
84  else
85  return 0;
86  }
87 # endif
88 # if _GTHREAD_USE_MUTEX_TIMEDLOCK
89  __gthrw(pthread_rwlock_timedrdlock);
90  static inline int
91  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
92  const timespec *__ts)
93  {
94  if (__gthread_active_p ())
95  return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
96  else
97  return 0;
98  }
99  __gthrw(pthread_rwlock_timedwrlock);
100  static inline int
101  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
102  const timespec *__ts)
103  {
104  if (__gthread_active_p ())
105  return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
106  else
107  return 0;
108  }
109 # endif
110 #else
111  static inline int
112  __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
113  { return pthread_rwlock_rdlock (__rwlock); }
114  static inline int
115  __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
116  { return pthread_rwlock_tryrdlock (__rwlock); }
117  static inline int
118  __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
119  { return pthread_rwlock_wrlock (__rwlock); }
120  static inline int
121  __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
122  { return pthread_rwlock_trywrlock (__rwlock); }
123  static inline int
124  __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
125  { return pthread_rwlock_unlock (__rwlock); }
126  static inline int
127  __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
128  { return pthread_rwlock_destroy (__rwlock); }
129  static inline int
130  __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
131  { return pthread_rwlock_init (__rwlock, NULL); }
132 # if _GTHREAD_USE_MUTEX_TIMEDLOCK
133  static inline int
134  __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
135  const timespec *__ts)
136  { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
137  static inline int
138  __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
139  const timespec *__ts)
140  { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
141 # endif
142 #endif
143 
144  /// A shared mutex type implemented using pthread_rwlock_t.
145  class __shared_mutex_pthread
146  {
147  friend class shared_timed_mutex;
148 
149 #ifdef PTHREAD_RWLOCK_INITIALIZER
150  pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
151 
152  public:
153  __shared_mutex_pthread() = default;
154  ~__shared_mutex_pthread() = default;
155 #else
156  pthread_rwlock_t _M_rwlock;
157 
158  public:
159  __shared_mutex_pthread()
160  {
161  int __ret = __glibcxx_rwlock_init(&_M_rwlock);
162  if (__ret == ENOMEM)
163  __throw_bad_alloc();
164  else if (__ret == EAGAIN)
165  __throw_system_error(int(errc::resource_unavailable_try_again));
166  else if (__ret == EPERM)
167  __throw_system_error(int(errc::operation_not_permitted));
168  // Errors not handled: EBUSY, EINVAL
169  __glibcxx_assert(__ret == 0);
170  }
171 
172  ~__shared_mutex_pthread()
173  {
174  int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
175  // Errors not handled: EBUSY, EINVAL
176  __glibcxx_assert(__ret == 0);
177  }
178 #endif
179 
180  __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
181  __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
182 
183  void
184  lock()
185  {
186  int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
187  if (__ret == EDEADLK)
188  __throw_system_error(int(errc::resource_deadlock_would_occur));
189  // Errors not handled: EINVAL
190  __glibcxx_assert(__ret == 0);
191  }
192 
193  bool
194  try_lock()
195  {
196  int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
197  if (__ret == EBUSY) return false;
198  // Errors not handled: EINVAL
199  __glibcxx_assert(__ret == 0);
200  return true;
201  }
202 
203  void
204  unlock()
205  {
206  int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
207  // Errors not handled: EPERM, EBUSY, EINVAL
208  __glibcxx_assert(__ret == 0);
209  }
210 
211  // Shared ownership
212 
213  void
214  lock_shared()
215  {
216  int __ret;
217  // We retry if we exceeded the maximum number of read locks supported by
218  // the POSIX implementation; this can result in busy-waiting, but this
219  // is okay based on the current specification of forward progress
220  // guarantees by the standard.
221  do
222  __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
223  while (__ret == EAGAIN);
224  if (__ret == EDEADLK)
225  __throw_system_error(int(errc::resource_deadlock_would_occur));
226  // Errors not handled: EINVAL
227  __glibcxx_assert(__ret == 0);
228  }
229 
230  bool
231  try_lock_shared()
232  {
233  int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
234  // If the maximum number of read locks has been exceeded, we just fail
235  // to acquire the lock. Unlike for lock(), we are not allowed to throw
236  // an exception.
237  if (__ret == EBUSY || __ret == EAGAIN) return false;
238  // Errors not handled: EINVAL
239  __glibcxx_assert(__ret == 0);
240  return true;
241  }
242 
243  void
244  unlock_shared()
245  {
246  unlock();
247  }
248 
249  void* native_handle() { return &_M_rwlock; }
250  };
251 #endif
252 
253 #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
254  /// A shared mutex type implemented using std::condition_variable.
255  class __shared_mutex_cv
256  {
257  friend class shared_timed_mutex;
258 
259  // Based on Howard Hinnant's reference implementation from N2406.
260 
261  // The high bit of _M_state is the write-entered flag which is set to
262  // indicate a writer has taken the lock or is queuing to take the lock.
263  // The remaining bits are the count of reader locks.
264  //
265  // To take a reader lock, block on gate1 while the write-entered flag is
266  // set or the maximum number of reader locks is held, then increment the
267  // reader lock count.
268  // To release, decrement the count, then if the write-entered flag is set
269  // and the count is zero then signal gate2 to wake a queued writer,
270  // otherwise if the maximum number of reader locks was held signal gate1
271  // to wake a reader.
272  //
273  // To take a writer lock, block on gate1 while the write-entered flag is
274  // set, then set the write-entered flag to start queueing, then block on
275  // gate2 while the number of reader locks is non-zero.
276  // To release, unset the write-entered flag and signal gate1 to wake all
277  // blocked readers and writers.
278  //
279  // This means that when no reader locks are held readers and writers get
280  // equal priority. When one or more reader locks is held a writer gets
281  // priority and no more reader locks can be taken while the writer is
282  // queued.
283 
284  // Only locked when accessing _M_state or waiting on condition variables.
285  mutex _M_mut;
286  // Used to block while write-entered is set or reader count at maximum.
287  condition_variable _M_gate1;
288  // Used to block queued writers while reader count is non-zero.
289  condition_variable _M_gate2;
290  // The write-entered flag and reader count.
291  unsigned _M_state;
292 
293  static constexpr unsigned _S_write_entered
294  = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
295  static constexpr unsigned _S_max_readers = ~_S_write_entered;
296 
297  // Test whether the write-entered flag is set. _M_mut must be locked.
298  bool _M_write_entered() const { return _M_state & _S_write_entered; }
299 
300  // The number of reader locks currently held. _M_mut must be locked.
301  unsigned _M_readers() const { return _M_state & _S_max_readers; }
302 
303  public:
304  __shared_mutex_cv() : _M_state(0) {}
305 
306  ~__shared_mutex_cv()
307  {
308  __glibcxx_assert( _M_state == 0 );
309  }
310 
311  __shared_mutex_cv(const __shared_mutex_cv&) = delete;
312  __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
313 
314  // Exclusive ownership
315 
316  void
317  lock()
318  {
319  unique_lock<mutex> __lk(_M_mut);
320  // Wait until we can set the write-entered flag.
321  _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
322  _M_state |= _S_write_entered;
323  // Then wait until there are no more readers.
324  _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
325  }
326 
327  bool
328  try_lock()
329  {
330  unique_lock<mutex> __lk(_M_mut, try_to_lock);
331  if (__lk.owns_lock() && _M_state == 0)
332  {
333  _M_state = _S_write_entered;
334  return true;
335  }
336  return false;
337  }
338 
339  void
340  unlock()
341  {
342  lock_guard<mutex> __lk(_M_mut);
343  __glibcxx_assert( _M_write_entered() );
344  _M_state = 0;
345  // call notify_all() while mutex is held so that another thread can't
346  // lock and unlock the mutex then destroy *this before we make the call.
347  _M_gate1.notify_all();
348  }
349 
350  // Shared ownership
351 
352  void
353  lock_shared()
354  {
355  unique_lock<mutex> __lk(_M_mut);
356  _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
357  ++_M_state;
358  }
359 
360  bool
361  try_lock_shared()
362  {
363  unique_lock<mutex> __lk(_M_mut, try_to_lock);
364  if (!__lk.owns_lock())
365  return false;
366  if (_M_state < _S_max_readers)
367  {
368  ++_M_state;
369  return true;
370  }
371  return false;
372  }
373 
374  void
375  unlock_shared()
376  {
377  lock_guard<mutex> __lk(_M_mut);
378  __glibcxx_assert( _M_readers() > 0 );
379  auto __prev = _M_state--;
380  if (_M_write_entered())
381  {
382  // Wake the queued writer if there are no more readers.
383  if (_M_readers() == 0)
384  _M_gate2.notify_one();
385  // No need to notify gate1 because we give priority to the queued
386  // writer, and that writer will eventually notify gate1 after it
387  // clears the write-entered flag.
388  }
389  else
390  {
391  // Wake any thread that was blocked on reader overflow.
392  if (__prev == _S_max_readers)
393  _M_gate1.notify_one();
394  }
395  }
396  };
397 #endif
398 
399 #if __cplusplus > 201402L
400  /// The standard shared mutex type.
401  class shared_mutex
402  {
403  public:
404  shared_mutex() = default;
405  ~shared_mutex() = default;
406 
407  shared_mutex(const shared_mutex&) = delete;
408  shared_mutex& operator=(const shared_mutex&) = delete;
409 
410  // Exclusive ownership
411 
412  void lock() { _M_impl.lock(); }
413  bool try_lock() { return _M_impl.try_lock(); }
414  void unlock() { _M_impl.unlock(); }
415 
416  // Shared ownership
417 
418  void lock_shared() { _M_impl.lock_shared(); }
419  bool try_lock_shared() { return _M_impl.try_lock_shared(); }
420  void unlock_shared() { _M_impl.unlock_shared(); }
421 
422 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
423  typedef void* native_handle_type;
424  native_handle_type native_handle() { return _M_impl.native_handle(); }
425 
426  private:
427  __shared_mutex_pthread _M_impl;
428 #else
429  private:
430  __shared_mutex_cv _M_impl;
431 #endif
432  };
433 #endif // C++17
434 
435 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
436  using __shared_timed_mutex_base = __shared_mutex_pthread;
437 #else
438  using __shared_timed_mutex_base = __shared_mutex_cv;
439 #endif
440 
441  /// The standard shared timed mutex type.
442  class shared_timed_mutex
443  : private __shared_timed_mutex_base
444  {
445  using _Base = __shared_timed_mutex_base;
446 
447  // Must use the same clock as condition_variable for __shared_mutex_cv.
448  typedef chrono::system_clock __clock_t;
449 
450  public:
451  shared_timed_mutex() = default;
452  ~shared_timed_mutex() = default;
453 
454  shared_timed_mutex(const shared_timed_mutex&) = delete;
455  shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
456 
457  // Exclusive ownership
458 
459  void lock() { _Base::lock(); }
460  bool try_lock() { return _Base::try_lock(); }
461  void unlock() { _Base::unlock(); }
462 
463  template<typename _Rep, typename _Period>
464  bool
465  try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
466  {
467  return try_lock_until(__clock_t::now() + __rel_time);
468  }
469 
470  // Shared ownership
471 
472  void lock_shared() { _Base::lock_shared(); }
473  bool try_lock_shared() { return _Base::try_lock_shared(); }
474  void unlock_shared() { _Base::unlock_shared(); }
475 
476  template<typename _Rep, typename _Period>
477  bool
478  try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
479  {
480  return try_lock_shared_until(__clock_t::now() + __rel_time);
481  }
482 
483 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
484 
485  // Exclusive ownership
486 
487  template<typename _Duration>
488  bool
489  try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
490  {
491  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
492  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
493 
494  __gthread_time_t __ts =
495  {
496  static_cast<std::time_t>(__s.time_since_epoch().count()),
497  static_cast<long>(__ns.count())
498  };
499 
500  int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
501  // On self-deadlock, we just fail to acquire the lock. Technically,
502  // the program violated the precondition.
503  if (__ret == ETIMEDOUT || __ret == EDEADLK)
504  return false;
505  // Errors not handled: EINVAL
506  __glibcxx_assert(__ret == 0);
507  return true;
508  }
509 
510  template<typename _Clock, typename _Duration>
511  bool
512  try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
513  {
514  // DR 887 - Sync unknown clock to known clock.
515  const typename _Clock::time_point __c_entry = _Clock::now();
516  const __clock_t::time_point __s_entry = __clock_t::now();
517  const auto __delta = __abs_time - __c_entry;
518  const auto __s_atime = __s_entry + __delta;
519  return try_lock_until(__s_atime);
520  }
521 
522  // Shared ownership
523 
524  template<typename _Duration>
525  bool
526  try_lock_shared_until(const chrono::time_point<__clock_t,
527  _Duration>& __atime)
528  {
529  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
530  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
531 
532  __gthread_time_t __ts =
533  {
534  static_cast<std::time_t>(__s.time_since_epoch().count()),
535  static_cast<long>(__ns.count())
536  };
537 
538  int __ret;
539  // Unlike for lock(), we are not allowed to throw an exception so if
540  // the maximum number of read locks has been exceeded, or we would
541  // deadlock, we just try to acquire the lock again (and will time out
542  // eventually).
543  // In cases where we would exceed the maximum number of read locks
544  // throughout the whole time until the timeout, we will fail to
545  // acquire the lock even if it would be logically free; however, this
546  // is allowed by the standard, and we made a "strong effort"
547  // (see C++14 30.4.1.4p26).
548  // For cases where the implementation detects a deadlock we
549  // intentionally block and timeout so that an early return isn't
550  // mistaken for a spurious failure, which might help users realise
551  // there is a deadlock.
552  do
553  __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
554  while (__ret == EAGAIN || __ret == EDEADLK);
555  if (__ret == ETIMEDOUT)
556  return false;
557  // Errors not handled: EINVAL
558  __glibcxx_assert(__ret == 0);
559  return true;
560  }
561 
562  template<typename _Clock, typename _Duration>
563  bool
564  try_lock_shared_until(const chrono::time_point<_Clock,
565  _Duration>& __abs_time)
566  {
567  // DR 887 - Sync unknown clock to known clock.
568  const typename _Clock::time_point __c_entry = _Clock::now();
569  const __clock_t::time_point __s_entry = __clock_t::now();
570  const auto __delta = __abs_time - __c_entry;
571  const auto __s_atime = __s_entry + __delta;
572  return try_lock_shared_until(__s_atime);
573  }
574 
575 #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
576 
577  // Exclusive ownership
578 
579  template<typename _Clock, typename _Duration>
580  bool
581  try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
582  {
583  unique_lock<mutex> __lk(_M_mut);
584  if (!_M_gate1.wait_until(__lk, __abs_time,
585  [=]{ return !_M_write_entered(); }))
586  {
587  return false;
588  }
589  _M_state |= _S_write_entered;
590  if (!_M_gate2.wait_until(__lk, __abs_time,
591  [=]{ return _M_readers() == 0; }))
592  {
593  _M_state ^= _S_write_entered;
594  // Wake all threads blocked while the write-entered flag was set.
595  _M_gate1.notify_all();
596  return false;
597  }
598  return true;
599  }
600 
601  // Shared ownership
602 
603  template <typename _Clock, typename _Duration>
604  bool
605  try_lock_shared_until(const chrono::time_point<_Clock,
606  _Duration>& __abs_time)
607  {
608  unique_lock<mutex> __lk(_M_mut);
609  if (!_M_gate1.wait_until(__lk, __abs_time,
610  [=]{ return _M_state < _S_max_readers; }))
611  {
612  return false;
613  }
614  ++_M_state;
615  return true;
616  }
617 
618 #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
619  };
620 #endif // _GLIBCXX_HAS_GTHREADS
621 
622  /// shared_lock
623  template<typename _Mutex>
624  class shared_lock
625  {
626  public:
627  typedef _Mutex mutex_type;
628 
629  // Shared locking
630 
631  shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
632 
633  explicit
634  shared_lock(mutex_type& __m)
635  : _M_pm(std::__addressof(__m)), _M_owns(true)
636  { __m.lock_shared(); }
637 
638  shared_lock(mutex_type& __m, defer_lock_t) noexcept
639  : _M_pm(std::__addressof(__m)), _M_owns(false) { }
640 
641  shared_lock(mutex_type& __m, try_to_lock_t)
642  : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
643 
644  shared_lock(mutex_type& __m, adopt_lock_t)
645  : _M_pm(std::__addressof(__m)), _M_owns(true) { }
646 
647  template<typename _Clock, typename _Duration>
648  shared_lock(mutex_type& __m,
649  const chrono::time_point<_Clock, _Duration>& __abs_time)
650  : _M_pm(std::__addressof(__m)),
651  _M_owns(__m.try_lock_shared_until(__abs_time)) { }
652 
653  template<typename _Rep, typename _Period>
654  shared_lock(mutex_type& __m,
655  const chrono::duration<_Rep, _Period>& __rel_time)
656  : _M_pm(std::__addressof(__m)),
657  _M_owns(__m.try_lock_shared_for(__rel_time)) { }
658 
659  ~shared_lock()
660  {
661  if (_M_owns)
662  _M_pm->unlock_shared();
663  }
664 
665  shared_lock(shared_lock const&) = delete;
666  shared_lock& operator=(shared_lock const&) = delete;
667 
668  shared_lock(shared_lock&& __sl) noexcept : shared_lock()
669  { swap(__sl); }
670 
671  shared_lock&
672  operator=(shared_lock&& __sl) noexcept
673  {
674  shared_lock(std::move(__sl)).swap(*this);
675  return *this;
676  }
677 
678  void
679  lock()
680  {
681  _M_lockable();
682  _M_pm->lock_shared();
683  _M_owns = true;
684  }
685 
686  bool
687  try_lock()
688  {
689  _M_lockable();
690  return _M_owns = _M_pm->try_lock_shared();
691  }
692 
693  template<typename _Rep, typename _Period>
694  bool
695  try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
696  {
697  _M_lockable();
698  return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
699  }
700 
701  template<typename _Clock, typename _Duration>
702  bool
703  try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
704  {
705  _M_lockable();
706  return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
707  }
708 
709  void
710  unlock()
711  {
712  if (!_M_owns)
713  __throw_system_error(int(errc::resource_deadlock_would_occur));
714  _M_pm->unlock_shared();
715  _M_owns = false;
716  }
717 
718  // Setters
719 
720  void
721  swap(shared_lock& __u) noexcept
722  {
723  std::swap(_M_pm, __u._M_pm);
724  std::swap(_M_owns, __u._M_owns);
725  }
726 
727  mutex_type*
728  release() noexcept
729  {
730  _M_owns = false;
731  return std::exchange(_M_pm, nullptr);
732  }
733 
734  // Getters
735 
736  bool owns_lock() const noexcept { return _M_owns; }
737 
738  explicit operator bool() const noexcept { return _M_owns; }
739 
740  mutex_type* mutex() const noexcept { return _M_pm; }
741 
742  private:
743  void
744  _M_lockable() const
745  {
746  if (_M_pm == nullptr)
747  __throw_system_error(int(errc::operation_not_permitted));
748  if (_M_owns)
749  __throw_system_error(int(errc::resource_deadlock_would_occur));
750  }
751 
752  mutex_type* _M_pm;
753  bool _M_owns;
754  };
755 
756  /// Swap specialization for shared_lock
757  template<typename _Mutex>
758  void
759  swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
760  { __x.swap(__y); }
761 
762  // @} group mutexes
763 _GLIBCXX_END_NAMESPACE_VERSION
764 } // namespace
765 
766 #endif // C++14
767 
768 #endif // _GLIBCXX_SHARED_MUTEX