libstdc++
atomic_2.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2008, 2009, 2010, 2011
4 // Free Software Foundation, Inc.
5 //
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
10 // any later version.
11 
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
16 
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
20 
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
25 
26 /** @file bits/atomic_2.h
27  * This is an internal header file, included by other library headers.
28  * Do not attempt to use it directly. @headername{atomic}
29  */
30 
31 #ifndef _GLIBCXX_ATOMIC_2_H
32 #define _GLIBCXX_ATOMIC_2_H 1
33 
34 #pragma GCC system_header
35 
36 namespace std _GLIBCXX_VISIBILITY(default)
37 {
38 _GLIBCXX_BEGIN_NAMESPACE_VERSION
39 
40 // 2 == __atomic2 == Always lock-free
41 // Assumed:
42 // _GLIBCXX_ATOMIC_BUILTINS_1
43 // _GLIBCXX_ATOMIC_BUILTINS_2
44 // _GLIBCXX_ATOMIC_BUILTINS_4
45 // _GLIBCXX_ATOMIC_BUILTINS_8
46 namespace __atomic2
47 {
48  /// atomic_flag
50  {
51  atomic_flag() = default;
52  ~atomic_flag() = default;
53  atomic_flag(const atomic_flag&) = delete;
54  atomic_flag& operator=(const atomic_flag&) = delete;
55  atomic_flag& operator=(const atomic_flag&) volatile = delete;
56 
57  // Conversion to ATOMIC_FLAG_INIT.
58  atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
59 
60  bool
61  test_and_set(memory_order __m = memory_order_seq_cst)
62  {
63  // Redundant synchronize if built-in for lock is a full barrier.
64  if (__m != memory_order_acquire && __m != memory_order_acq_rel)
65  __sync_synchronize();
66  return __sync_lock_test_and_set(&_M_i, 1);
67  }
68 
69  bool
70  test_and_set(memory_order __m = memory_order_seq_cst) volatile
71  {
72  // Redundant synchronize if built-in for lock is a full barrier.
73  if (__m != memory_order_acquire && __m != memory_order_acq_rel)
74  __sync_synchronize();
75  return __sync_lock_test_and_set(&_M_i, 1);
76  }
77 
78  void
79  clear(memory_order __m = memory_order_seq_cst)
80  {
81  __glibcxx_assert(__m != memory_order_consume);
82  __glibcxx_assert(__m != memory_order_acquire);
83  __glibcxx_assert(__m != memory_order_acq_rel);
84 
85  __sync_lock_release(&_M_i);
86  if (__m != memory_order_acquire && __m != memory_order_acq_rel)
87  __sync_synchronize();
88  }
89 
90  void
91  clear(memory_order __m = memory_order_seq_cst) volatile
92  {
93  __glibcxx_assert(__m != memory_order_consume);
94  __glibcxx_assert(__m != memory_order_acquire);
95  __glibcxx_assert(__m != memory_order_acq_rel);
96 
97  __sync_lock_release(&_M_i);
98  if (__m != memory_order_acquire && __m != memory_order_acq_rel)
99  __sync_synchronize();
100  }
101  };
102 
103 
104  /// Base class for atomic integrals.
105  //
106  // For each of the integral types, define atomic_[integral type] struct
107  //
108  // atomic_bool bool
109  // atomic_char char
110  // atomic_schar signed char
111  // atomic_uchar unsigned char
112  // atomic_short short
113  // atomic_ushort unsigned short
114  // atomic_int int
115  // atomic_uint unsigned int
116  // atomic_long long
117  // atomic_ulong unsigned long
118  // atomic_llong long long
119  // atomic_ullong unsigned long long
120  // atomic_char16_t char16_t
121  // atomic_char32_t char32_t
122  // atomic_wchar_t wchar_t
123  //
124  // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
125  // 8 bytes, since that is what GCC built-in functions for atomic
126  // memory access expect.
127  template<typename _ITp>
129  {
130  private:
131  typedef _ITp __int_type;
132 
133  __int_type _M_i;
134 
135  public:
136  __atomic_base() = default;
137  ~__atomic_base() = default;
138  __atomic_base(const __atomic_base&) = delete;
139  __atomic_base& operator=(const __atomic_base&) = delete;
140  __atomic_base& operator=(const __atomic_base&) volatile = delete;
141 
142  // Requires __int_type convertible to _M_i.
143  constexpr __atomic_base(__int_type __i): _M_i (__i) { }
144 
145  operator __int_type() const
146  { return load(); }
147 
148  operator __int_type() const volatile
149  { return load(); }
150 
151  __int_type
152  operator=(__int_type __i)
153  {
154  store(__i);
155  return __i;
156  }
157 
158  __int_type
159  operator=(__int_type __i) volatile
160  {
161  store(__i);
162  return __i;
163  }
164 
165  __int_type
166  operator++(int)
167  { return fetch_add(1); }
168 
169  __int_type
170  operator++(int) volatile
171  { return fetch_add(1); }
172 
173  __int_type
174  operator--(int)
175  { return fetch_sub(1); }
176 
177  __int_type
178  operator--(int) volatile
179  { return fetch_sub(1); }
180 
181  __int_type
182  operator++()
183  { return __sync_add_and_fetch(&_M_i, 1); }
184 
185  __int_type
186  operator++() volatile
187  { return __sync_add_and_fetch(&_M_i, 1); }
188 
189  __int_type
190  operator--()
191  { return __sync_sub_and_fetch(&_M_i, 1); }
192 
193  __int_type
194  operator--() volatile
195  { return __sync_sub_and_fetch(&_M_i, 1); }
196 
197  __int_type
198  operator+=(__int_type __i)
199  { return __sync_add_and_fetch(&_M_i, __i); }
200 
201  __int_type
202  operator+=(__int_type __i) volatile
203  { return __sync_add_and_fetch(&_M_i, __i); }
204 
205  __int_type
206  operator-=(__int_type __i)
207  { return __sync_sub_and_fetch(&_M_i, __i); }
208 
209  __int_type
210  operator-=(__int_type __i) volatile
211  { return __sync_sub_and_fetch(&_M_i, __i); }
212 
213  __int_type
214  operator&=(__int_type __i)
215  { return __sync_and_and_fetch(&_M_i, __i); }
216 
217  __int_type
218  operator&=(__int_type __i) volatile
219  { return __sync_and_and_fetch(&_M_i, __i); }
220 
221  __int_type
222  operator|=(__int_type __i)
223  { return __sync_or_and_fetch(&_M_i, __i); }
224 
225  __int_type
226  operator|=(__int_type __i) volatile
227  { return __sync_or_and_fetch(&_M_i, __i); }
228 
229  __int_type
230  operator^=(__int_type __i)
231  { return __sync_xor_and_fetch(&_M_i, __i); }
232 
233  __int_type
234  operator^=(__int_type __i) volatile
235  { return __sync_xor_and_fetch(&_M_i, __i); }
236 
237  bool
238  is_lock_free() const
239  { return true; }
240 
241  bool
242  is_lock_free() const volatile
243  { return true; }
244 
245  void
246  store(__int_type __i, memory_order __m = memory_order_seq_cst)
247  {
248  __glibcxx_assert(__m != memory_order_acquire);
249  __glibcxx_assert(__m != memory_order_acq_rel);
250  __glibcxx_assert(__m != memory_order_consume);
251 
252  if (__m == memory_order_relaxed)
253  _M_i = __i;
254  else
255  {
256  // write_mem_barrier();
257  _M_i = __i;
258  if (__m == memory_order_seq_cst)
259  __sync_synchronize();
260  }
261  }
262 
263  void
264  store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
265  {
266  __glibcxx_assert(__m != memory_order_acquire);
267  __glibcxx_assert(__m != memory_order_acq_rel);
268  __glibcxx_assert(__m != memory_order_consume);
269 
270  if (__m == memory_order_relaxed)
271  _M_i = __i;
272  else
273  {
274  // write_mem_barrier();
275  _M_i = __i;
276  if (__m == memory_order_seq_cst)
277  __sync_synchronize();
278  }
279  }
280 
281  __int_type
282  load(memory_order __m = memory_order_seq_cst) const
283  {
284  __glibcxx_assert(__m != memory_order_release);
285  __glibcxx_assert(__m != memory_order_acq_rel);
286 
287  __sync_synchronize();
288  __int_type __ret = _M_i;
289  __sync_synchronize();
290  return __ret;
291  }
292 
293  __int_type
294  load(memory_order __m = memory_order_seq_cst) const volatile
295  {
296  __glibcxx_assert(__m != memory_order_release);
297  __glibcxx_assert(__m != memory_order_acq_rel);
298 
299  __sync_synchronize();
300  __int_type __ret = _M_i;
301  __sync_synchronize();
302  return __ret;
303  }
304 
305  __int_type
306  exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
307  {
308  // XXX built-in assumes memory_order_acquire.
309  return __sync_lock_test_and_set(&_M_i, __i);
310  }
311 
312 
313  __int_type
314  exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
315  {
316  // XXX built-in assumes memory_order_acquire.
317  return __sync_lock_test_and_set(&_M_i, __i);
318  }
319 
320  bool
321  compare_exchange_weak(__int_type& __i1, __int_type __i2,
322  memory_order __m1, memory_order __m2)
323  { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
324 
325  bool
326  compare_exchange_weak(__int_type& __i1, __int_type __i2,
327  memory_order __m1, memory_order __m2) volatile
328  { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
329 
330  bool
331  compare_exchange_weak(__int_type& __i1, __int_type __i2,
332  memory_order __m = memory_order_seq_cst)
333  {
334  return compare_exchange_weak(__i1, __i2, __m,
335  __calculate_memory_order(__m));
336  }
337 
338  bool
339  compare_exchange_weak(__int_type& __i1, __int_type __i2,
340  memory_order __m = memory_order_seq_cst) volatile
341  {
342  return compare_exchange_weak(__i1, __i2, __m,
343  __calculate_memory_order(__m));
344  }
345 
346  bool
347  compare_exchange_strong(__int_type& __i1, __int_type __i2,
348  memory_order __m1, memory_order __m2)
349  {
350  __glibcxx_assert(__m2 != memory_order_release);
351  __glibcxx_assert(__m2 != memory_order_acq_rel);
352  __glibcxx_assert(__m2 <= __m1);
353 
354  __int_type __i1o = __i1;
355  __int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
356 
357  // Assume extra stores (of same value) allowed in true case.
358  __i1 = __i1n;
359  return __i1o == __i1n;
360  }
361 
362  bool
363  compare_exchange_strong(__int_type& __i1, __int_type __i2,
364  memory_order __m1, memory_order __m2) volatile
365  {
366  __glibcxx_assert(__m2 != memory_order_release);
367  __glibcxx_assert(__m2 != memory_order_acq_rel);
368  __glibcxx_assert(__m2 <= __m1);
369 
370  __int_type __i1o = __i1;
371  __int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
372 
373  // Assume extra stores (of same value) allowed in true case.
374  __i1 = __i1n;
375  return __i1o == __i1n;
376  }
377 
378  bool
379  compare_exchange_strong(__int_type& __i1, __int_type __i2,
380  memory_order __m = memory_order_seq_cst)
381  {
382  return compare_exchange_strong(__i1, __i2, __m,
383  __calculate_memory_order(__m));
384  }
385 
386  bool
387  compare_exchange_strong(__int_type& __i1, __int_type __i2,
388  memory_order __m = memory_order_seq_cst) volatile
389  {
390  return compare_exchange_strong(__i1, __i2, __m,
391  __calculate_memory_order(__m));
392  }
393 
394  __int_type
395  fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
396  { return __sync_fetch_and_add(&_M_i, __i); }
397 
398  __int_type
399  fetch_add(__int_type __i,
400  memory_order __m = memory_order_seq_cst) volatile
401  { return __sync_fetch_and_add(&_M_i, __i); }
402 
403  __int_type
404  fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
405  { return __sync_fetch_and_sub(&_M_i, __i); }
406 
407  __int_type
408  fetch_sub(__int_type __i,
409  memory_order __m = memory_order_seq_cst) volatile
410  { return __sync_fetch_and_sub(&_M_i, __i); }
411 
412  __int_type
413  fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
414  { return __sync_fetch_and_and(&_M_i, __i); }
415 
416  __int_type
417  fetch_and(__int_type __i,
418  memory_order __m = memory_order_seq_cst) volatile
419  { return __sync_fetch_and_and(&_M_i, __i); }
420 
421  __int_type
422  fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
423  { return __sync_fetch_and_or(&_M_i, __i); }
424 
425  __int_type
426  fetch_or(__int_type __i,
427  memory_order __m = memory_order_seq_cst) volatile
428  { return __sync_fetch_and_or(&_M_i, __i); }
429 
430  __int_type
431  fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
432  { return __sync_fetch_and_xor(&_M_i, __i); }
433 
434  __int_type
435  fetch_xor(__int_type __i,
436  memory_order __m = memory_order_seq_cst) volatile
437  { return __sync_fetch_and_xor(&_M_i, __i); }
438  };
439 
440 
441  /// Partial specialization for pointer types.
442  template<typename _PTp>
443  struct __atomic_base<_PTp*>
444  {
445  private:
446  typedef _PTp* __pointer_type;
447 
448  __pointer_type _M_p;
449 
450  public:
451  __atomic_base() = default;
452  ~__atomic_base() = default;
453  __atomic_base(const __atomic_base&) = delete;
454  __atomic_base& operator=(const __atomic_base&) = delete;
455  __atomic_base& operator=(const __atomic_base&) volatile = delete;
456 
457  // Requires __pointer_type convertible to _M_p.
458  constexpr __atomic_base(__pointer_type __p): _M_p (__p) { }
459 
460  operator __pointer_type() const
461  { return load(); }
462 
463  operator __pointer_type() const volatile
464  { return load(); }
465 
466  __pointer_type
467  operator=(__pointer_type __p)
468  {
469  store(__p);
470  return __p;
471  }
472 
473  __pointer_type
474  operator=(__pointer_type __p) volatile
475  {
476  store(__p);
477  return __p;
478  }
479 
480  __pointer_type
481  operator++(int)
482  { return fetch_add(1); }
483 
484  __pointer_type
485  operator++(int) volatile
486  { return fetch_add(1); }
487 
488  __pointer_type
489  operator--(int)
490  { return fetch_sub(1); }
491 
492  __pointer_type
493  operator--(int) volatile
494  { return fetch_sub(1); }
495 
496  __pointer_type
497  operator++()
498  { return fetch_add(1) + 1; }
499 
500  __pointer_type
501  operator++() volatile
502  { return fetch_add(1) + 1; }
503 
504  __pointer_type
505  operator--()
506  { return fetch_sub(1) -1; }
507 
508  __pointer_type
509  operator--() volatile
510  { return fetch_sub(1) -1; }
511 
512  __pointer_type
513  operator+=(ptrdiff_t __d)
514  { return fetch_add(__d) + __d; }
515 
516  __pointer_type
517  operator+=(ptrdiff_t __d) volatile
518  { return fetch_add(__d) + __d; }
519 
520  __pointer_type
521  operator-=(ptrdiff_t __d)
522  { return fetch_sub(__d) - __d; }
523 
524  __pointer_type
525  operator-=(ptrdiff_t __d) volatile
526  { return fetch_sub(__d) - __d; }
527 
528  bool
529  is_lock_free() const
530  { return true; }
531 
532  bool
533  is_lock_free() const volatile
534  { return true; }
535 
536  void
537  store(__pointer_type __p, memory_order __m = memory_order_seq_cst)
538  {
539  __glibcxx_assert(__m != memory_order_acquire);
540  __glibcxx_assert(__m != memory_order_acq_rel);
541  __glibcxx_assert(__m != memory_order_consume);
542 
543  if (__m == memory_order_relaxed)
544  _M_p = __p;
545  else
546  {
547  // write_mem_barrier();
548  _M_p = __p;
549  if (__m == memory_order_seq_cst)
550  __sync_synchronize();
551  }
552  }
553 
554  void
555  store(__pointer_type __p,
556  memory_order __m = memory_order_seq_cst) volatile
557  {
558  __glibcxx_assert(__m != memory_order_acquire);
559  __glibcxx_assert(__m != memory_order_acq_rel);
560  __glibcxx_assert(__m != memory_order_consume);
561 
562  if (__m == memory_order_relaxed)
563  _M_p = __p;
564  else
565  {
566  // write_mem_barrier();
567  _M_p = __p;
568  if (__m == memory_order_seq_cst)
569  __sync_synchronize();
570  }
571  }
572 
573  __pointer_type
574  load(memory_order __m = memory_order_seq_cst) const
575  {
576  __glibcxx_assert(__m != memory_order_release);
577  __glibcxx_assert(__m != memory_order_acq_rel);
578 
579  __sync_synchronize();
580  __pointer_type __ret = _M_p;
581  __sync_synchronize();
582  return __ret;
583  }
584 
585  __pointer_type
586  load(memory_order __m = memory_order_seq_cst) const volatile
587  {
588  __glibcxx_assert(__m != memory_order_release);
589  __glibcxx_assert(__m != memory_order_acq_rel);
590 
591  __sync_synchronize();
592  __pointer_type __ret = _M_p;
593  __sync_synchronize();
594  return __ret;
595  }
596 
597  __pointer_type
598  exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
599  {
600  // XXX built-in assumes memory_order_acquire.
601  return __sync_lock_test_and_set(&_M_p, __p);
602  }
603 
604 
605  __pointer_type
606  exchange(__pointer_type __p,
607  memory_order __m = memory_order_seq_cst) volatile
608  {
609  // XXX built-in assumes memory_order_acquire.
610  return __sync_lock_test_and_set(&_M_p, __p);
611  }
612 
613  bool
614  compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
615  memory_order __m1, memory_order __m2)
616  {
617  __glibcxx_assert(__m2 != memory_order_release);
618  __glibcxx_assert(__m2 != memory_order_acq_rel);
619  __glibcxx_assert(__m2 <= __m1);
620 
621  __pointer_type __p1o = __p1;
622  __pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
623 
624  // Assume extra stores (of same value) allowed in true case.
625  __p1 = __p1n;
626  return __p1o == __p1n;
627  }
628 
629  bool
630  compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
631  memory_order __m1, memory_order __m2) volatile
632  {
633  __glibcxx_assert(__m2 != memory_order_release);
634  __glibcxx_assert(__m2 != memory_order_acq_rel);
635  __glibcxx_assert(__m2 <= __m1);
636 
637  __pointer_type __p1o = __p1;
638  __pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
639 
640  // Assume extra stores (of same value) allowed in true case.
641  __p1 = __p1n;
642  return __p1o == __p1n;
643  }
644 
645  __pointer_type
646  fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
647  { return __sync_fetch_and_add(&_M_p, __d); }
648 
649  __pointer_type
650  fetch_add(ptrdiff_t __d,
651  memory_order __m = memory_order_seq_cst) volatile
652  { return __sync_fetch_and_add(&_M_p, __d); }
653 
654  __pointer_type
655  fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
656  { return __sync_fetch_and_sub(&_M_p, __d); }
657 
658  __pointer_type
659  fetch_sub(ptrdiff_t __d,
660  memory_order __m = memory_order_seq_cst) volatile
661  { return __sync_fetch_and_sub(&_M_p, __d); }
662  };
663 
664 } // namespace __atomic2
665 
666 _GLIBCXX_END_NAMESPACE_VERSION
667 } // namespace std
668 
669 #endif