libstdc++
atomic_wait.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2020-2025 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_wait.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{atomic}
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_WAIT_H
31 #define _GLIBCXX_ATOMIC_WAIT_H 1
32 
33 #ifdef _GLIBCXX_SYSHDR
34 #pragma GCC system_header
35 #endif
36 
37 #include <bits/version.h>
38 
39 #if __glibcxx_atomic_wait
40 #include <cstdint>
41 #include <bits/functional_hash.h>
42 #include <bits/gthr.h>
43 #include <ext/numeric_traits.h>
44 
45 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
46 # include <cerrno>
47 # include <climits>
48 # include <unistd.h>
49 # include <syscall.h>
50 # include <bits/functexcept.h>
51 #endif
52 
53 # include <bits/std_mutex.h> // std::mutex, std::__condvar
54 
55 namespace std _GLIBCXX_VISIBILITY(default)
56 {
57 _GLIBCXX_BEGIN_NAMESPACE_VERSION
58  namespace __detail
59  {
60 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
61 #define _GLIBCXX_HAVE_PLATFORM_WAIT 1
62  using __platform_wait_t = int;
63  inline constexpr size_t __platform_wait_alignment = 4;
64 #else
65 // define _GLIBCX_HAVE_PLATFORM_WAIT and implement __platform_wait()
66 // and __platform_notify() if there is a more efficient primitive supported
67 // by the platform (e.g. __ulock_wait()/__ulock_wake()) which is better than
68 // a mutex/condvar based wait.
69 # if ATOMIC_LONG_LOCK_FREE == 2
70  using __platform_wait_t = unsigned long;
71 # else
72  using __platform_wait_t = unsigned int;
73 # endif
74  inline constexpr size_t __platform_wait_alignment
75  = __alignof__(__platform_wait_t);
76 #endif
77  } // namespace __detail
78 
79  template<typename _Tp>
80  inline constexpr bool __platform_wait_uses_type
81 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
82  = is_scalar_v<_Tp>
83  && ((sizeof(_Tp) == sizeof(__detail::__platform_wait_t))
84  && (alignof(_Tp*) >= __detail::__platform_wait_alignment));
85 #else
86  = false;
87 #endif
88 
89  namespace __detail
90  {
91 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
92  enum class __futex_wait_flags : int
93  {
94 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX_PRIVATE
95  __private_flag = 128,
96 #else
97  __private_flag = 0,
98 #endif
99  __wait = 0,
100  __wake = 1,
101  __wait_bitset = 9,
102  __wake_bitset = 10,
103  __wait_private = __wait | __private_flag,
104  __wake_private = __wake | __private_flag,
105  __wait_bitset_private = __wait_bitset | __private_flag,
106  __wake_bitset_private = __wake_bitset | __private_flag,
107  __bitset_match_any = -1
108  };
109 
110  template<typename _Tp>
111  void
112  __platform_wait(const _Tp* __addr, __platform_wait_t __val) noexcept
113  {
114  auto __e = syscall (SYS_futex, static_cast<const void*>(__addr),
115  static_cast<int>(__futex_wait_flags::__wait_private),
116  __val, nullptr);
117  if (!__e || errno == EAGAIN)
118  return;
119  if (errno != EINTR)
120  __throw_system_error(errno);
121  }
122 
123  template<typename _Tp>
124  void
125  __platform_notify(const _Tp* __addr, bool __all) noexcept
126  {
127  syscall (SYS_futex, static_cast<const void*>(__addr),
128  static_cast<int>(__futex_wait_flags::__wake_private),
129  __all ? INT_MAX : 1);
130  }
131 #endif
132 
133  inline void
134  __thread_yield() noexcept
135  {
136 #if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
137  __gthread_yield();
138 #endif
139  }
140 
141  inline void
142  __thread_relax() noexcept
143  {
144 #if defined __i386__ || defined __x86_64__
145  __builtin_ia32_pause();
146 #else
147  __thread_yield();
148 #endif
149  }
150 
151  inline constexpr auto __atomic_spin_count_relax = 12;
152  inline constexpr auto __atomic_spin_count = 16;
153 
154  struct __default_spin_policy
155  {
156  bool
157  operator()() const noexcept
158  { return false; }
159  };
160 
161  template<typename _Pred,
162  typename _Spin = __default_spin_policy>
163  bool
164  __atomic_spin(_Pred& __pred, _Spin __spin = _Spin{ }) noexcept
165  {
166  for (auto __i = 0; __i < __atomic_spin_count; ++__i)
167  {
168  if (__pred())
169  return true;
170 
171  if (__i < __atomic_spin_count_relax)
172  __detail::__thread_relax();
173  else
174  __detail::__thread_yield();
175  }
176 
177  while (__spin())
178  {
179  if (__pred())
180  return true;
181  }
182 
183  return false;
184  }
185 
186  // return true if equal
187  template<typename _Tp>
188  bool __atomic_compare(const _Tp& __a, const _Tp& __b)
189  {
190  // TODO make this do the correct padding bit ignoring comparison
191  return __builtin_memcmp(std::addressof(__a), std::addressof(__b),
192  sizeof(_Tp)) == 0;
193  }
194 
195  struct __waiter_pool_base
196  {
197  // Don't use std::hardware_destructive_interference_size here because we
198  // don't want the layout of library types to depend on compiler options.
199  static constexpr auto _S_align = 64;
200 
201  alignas(_S_align) __platform_wait_t _M_wait = 0;
202 
203 #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
204  mutex _M_mtx;
205 #endif
206 
207  alignas(_S_align) __platform_wait_t _M_ver = 0;
208 
209 #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
210  __condvar _M_cv;
211 #endif
212  __waiter_pool_base() = default;
213 
214  void
215  _M_enter_wait() noexcept
216  { __atomic_fetch_add(&_M_wait, 1, __ATOMIC_SEQ_CST); }
217 
218  void
219  _M_leave_wait() noexcept
220  { __atomic_fetch_sub(&_M_wait, 1, __ATOMIC_RELEASE); }
221 
222  bool
223  _M_waiting() const noexcept
224  {
225  __platform_wait_t __res;
226  __atomic_load(&_M_wait, &__res, __ATOMIC_SEQ_CST);
227  return __res != 0;
228  }
229 
230  void
231  _M_notify(__platform_wait_t* __addr, [[maybe_unused]] bool __all,
232  bool __bare) noexcept
233  {
234 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
235  if (__addr == &_M_ver)
236  {
237  __atomic_fetch_add(__addr, 1, __ATOMIC_SEQ_CST);
238  __all = true;
239  }
240 
241  if (__bare || _M_waiting())
242  __platform_notify(__addr, __all);
243 #else
244  {
245  lock_guard<mutex> __l(_M_mtx);
246  __atomic_fetch_add(__addr, 1, __ATOMIC_RELAXED);
247  }
248  if (__bare || _M_waiting())
249  _M_cv.notify_all();
250 #endif
251  }
252 
253  static __waiter_pool_base&
254  _S_for(const void* __addr) noexcept
255  {
256  constexpr __UINTPTR_TYPE__ __ct = 16;
257  static __waiter_pool_base __w[__ct];
258  auto __key = ((__UINTPTR_TYPE__)__addr >> 2) % __ct;
259  return __w[__key];
260  }
261  };
262 
263  struct __waiter_pool : __waiter_pool_base
264  {
265  void
266  _M_do_wait(const __platform_wait_t* __addr, __platform_wait_t __old) noexcept
267  {
268 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
269  __platform_wait(__addr, __old);
270 #else
271  __platform_wait_t __val;
272  __atomic_load(__addr, &__val, __ATOMIC_SEQ_CST);
273  if (__val == __old)
274  {
275  lock_guard<mutex> __l(_M_mtx);
276  __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
277  if (__val == __old)
278  _M_cv.wait(_M_mtx);
279  }
280 #endif // __GLIBCXX_HAVE_PLATFORM_WAIT
281  }
282  };
283 
284  template<typename _Tp>
285  struct __waiter_base
286  {
287  using __waiter_type = _Tp;
288 
289  __waiter_type& _M_w;
290  __platform_wait_t* _M_addr;
291 
292  template<typename _Up>
293  static __platform_wait_t*
294  _S_wait_addr(const _Up* __a, __platform_wait_t* __b)
295  {
296  if constexpr (__platform_wait_uses_type<_Up>)
297  return reinterpret_cast<__platform_wait_t*>(const_cast<_Up*>(__a));
298  else
299  return __b;
300  }
301 
302  static __waiter_type&
303  _S_for(const void* __addr) noexcept
304  {
305  static_assert(sizeof(__waiter_type) == sizeof(__waiter_pool_base));
306  auto& res = __waiter_pool_base::_S_for(__addr);
307  return reinterpret_cast<__waiter_type&>(res);
308  }
309 
310  template<typename _Up>
311  explicit __waiter_base(const _Up* __addr) noexcept
312  : _M_w(_S_for(__addr))
313  , _M_addr(_S_wait_addr(__addr, &_M_w._M_ver))
314  { }
315 
316  void
317  _M_notify(bool __all, bool __bare = false) noexcept
318  { _M_w._M_notify(_M_addr, __all, __bare); }
319 
320  template<typename _Up, typename _ValFn,
321  typename _Spin = __default_spin_policy>
322  static bool
323  _S_do_spin_v(__platform_wait_t* __addr,
324  const _Up& __old, _ValFn __vfn,
325  __platform_wait_t& __val,
326  _Spin __spin = _Spin{ })
327  {
328  auto const __pred = [=]
329  { return !__detail::__atomic_compare(__old, __vfn()); };
330 
331  if constexpr (__platform_wait_uses_type<_Up>)
332  {
333  __builtin_memcpy(&__val, &__old, sizeof(__val));
334  }
335  else
336  {
337  __atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);
338  }
339  return __atomic_spin(__pred, __spin);
340  }
341 
342  template<typename _Up, typename _ValFn,
343  typename _Spin = __default_spin_policy>
344  bool
345  _M_do_spin_v(const _Up& __old, _ValFn __vfn,
346  __platform_wait_t& __val,
347  _Spin __spin = _Spin{ })
348  { return _S_do_spin_v(_M_addr, __old, __vfn, __val, __spin); }
349 
350  template<typename _Pred,
351  typename _Spin = __default_spin_policy>
352  static bool
353  _S_do_spin(const __platform_wait_t* __addr,
354  _Pred __pred,
355  __platform_wait_t& __val,
356  _Spin __spin = _Spin{ })
357  {
358  __atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);
359  return __atomic_spin(__pred, __spin);
360  }
361 
362  template<typename _Pred,
363  typename _Spin = __default_spin_policy>
364  bool
365  _M_do_spin(_Pred __pred, __platform_wait_t& __val,
366  _Spin __spin = _Spin{ })
367  { return _S_do_spin(_M_addr, __pred, __val, __spin); }
368  };
369 
370  template<typename _EntersWait>
371  struct __waiter : __waiter_base<__waiter_pool>
372  {
373  using __base_type = __waiter_base<__waiter_pool>;
374 
375  template<typename _Tp>
376  explicit __waiter(const _Tp* __addr) noexcept
377  : __base_type(__addr)
378  {
379  if constexpr (_EntersWait::value)
380  _M_w._M_enter_wait();
381  }
382 
383  ~__waiter()
384  {
385  if constexpr (_EntersWait::value)
386  _M_w._M_leave_wait();
387  }
388 
389  template<typename _Tp, typename _ValFn>
390  void
391  _M_do_wait_v(_Tp __old, _ValFn __vfn)
392  {
393  do
394  {
395  __platform_wait_t __val;
396  if (__base_type::_M_do_spin_v(__old, __vfn, __val))
397  return;
398  __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
399  }
400  while (__detail::__atomic_compare(__old, __vfn()));
401  }
402 
403  template<typename _Pred>
404  void
405  _M_do_wait(_Pred __pred) noexcept
406  {
407  do
408  {
409  __platform_wait_t __val;
410  if (__base_type::_M_do_spin(__pred, __val))
411  return;
412  __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
413  }
414  while (!__pred());
415  }
416  };
417 
418  using __enters_wait = __waiter<std::true_type>;
419  using __bare_wait = __waiter<std::false_type>;
420  } // namespace __detail
421 
422  template<typename _Tp, typename _ValFn>
423  void
424  __atomic_wait_address_v(const _Tp* __addr, _Tp __old,
425  _ValFn __vfn) noexcept
426  {
427  __detail::__enters_wait __w(__addr);
428  __w._M_do_wait_v(__old, __vfn);
429  }
430 
431  template<typename _Tp, typename _Pred>
432  void
433  __atomic_wait_address(const _Tp* __addr, _Pred __pred) noexcept
434  {
435  __detail::__enters_wait __w(__addr);
436  __w._M_do_wait(__pred);
437  }
438 
439  // This call is to be used by atomic types which track contention externally
440  template<typename _Pred>
441  void
442  __atomic_wait_address_bare(const __detail::__platform_wait_t* __addr,
443  _Pred __pred) noexcept
444  {
445 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
446  do
447  {
448  __detail::__platform_wait_t __val;
449  if (__detail::__bare_wait::_S_do_spin(__addr, __pred, __val))
450  return;
451  __detail::__platform_wait(__addr, __val);
452  }
453  while (!__pred());
454 #else // !_GLIBCXX_HAVE_PLATFORM_WAIT
455  __detail::__bare_wait __w(__addr);
456  __w._M_do_wait(__pred);
457 #endif
458  }
459 
460  template<typename _Tp>
461  void
462  __atomic_notify_address(const _Tp* __addr, bool __all) noexcept
463  {
464  __detail::__bare_wait __w(__addr);
465  __w._M_notify(__all);
466  }
467 
468  // This call is to be used by atomic types which track contention externally
469  inline void
470  __atomic_notify_address_bare(const __detail::__platform_wait_t* __addr,
471  bool __all) noexcept
472  {
473 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
474  __detail::__platform_notify(__addr, __all);
475 #else
476  __detail::__bare_wait __w(__addr);
477  __w._M_notify(__all, true);
478 #endif
479  }
480 _GLIBCXX_END_NAMESPACE_VERSION
481 } // namespace std
482 #endif // __glibcxx_atomic_wait
483 #endif // _GLIBCXX_ATOMIC_WAIT_H
constexpr _Tp * addressof(_Tp &__r) noexcept
Returns the actual address of the object or function referenced by r, even in the presence of an over...
Definition: move.h:176
ISO C++ entities toplevel namespace is std.