Linux e000727e84fa 5.4.0-208-generic #228-Ubuntu SMP Fri Feb 7 19:41:33 UTC 2025 x86_64
Apache/2.4.62 (Debian)
: 192.168.16.2 | : 192.168.16.1
Cant Read [ /etc/named.conf ]
8.2.27
root
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
README
+ Create Folder
+ Create File
/
usr /
include /
c++ /
12 /
bits /
[ HOME SHELL ]
Name
Size
Permission
Action
algorithmfwd.h
23.97
KB
-rw-r--r--
align.h
3.62
KB
-rw-r--r--
alloc_traits.h
27.03
KB
-rw-r--r--
allocated_ptr.h
3.25
KB
-rw-r--r--
allocator.h
9.55
KB
-rw-r--r--
atomic_base.h
56.73
KB
-rw-r--r--
atomic_futex.h
12.08
KB
-rw-r--r--
atomic_lockfree_defines.h
2.3
KB
-rw-r--r--
atomic_timed_wait.h
13.18
KB
-rw-r--r--
atomic_wait.h
12.2
KB
-rw-r--r--
basic_ios.h
15.7
KB
-rw-r--r--
basic_ios.tcc
5.96
KB
-rw-r--r--
basic_string.h
149.19
KB
-rw-r--r--
basic_string.tcc
33.76
KB
-rw-r--r--
boost_concept_check.h
29.13
KB
-rw-r--r--
c++0x_warning.h
1.44
KB
-rw-r--r--
char_traits.h
28.54
KB
-rw-r--r--
charconv.h
3.35
KB
-rw-r--r--
chrono.h
41.53
KB
-rw-r--r--
codecvt.h
24.9
KB
-rw-r--r--
concept_check.h
3.34
KB
-rw-r--r--
cow_string.h
129.11
KB
-rw-r--r--
cpp_type_traits.h
13.72
KB
-rw-r--r--
cxxabi_forced.h
1.77
KB
-rw-r--r--
cxxabi_init_exception.h
2.17
KB
-rw-r--r--
deque.tcc
41.38
KB
-rw-r--r--
enable_special_members.h
12.16
KB
-rw-r--r--
erase_if.h
2.09
KB
-rw-r--r--
exception.h
2.45
KB
-rw-r--r--
exception_defines.h
1.61
KB
-rw-r--r--
exception_ptr.h
7.73
KB
-rw-r--r--
forward_list.h
49.87
KB
-rw-r--r--
forward_list.tcc
13.71
KB
-rw-r--r--
fs_dir.h
16.23
KB
-rw-r--r--
fs_fwd.h
10.66
KB
-rw-r--r--
fs_ops.h
10.49
KB
-rw-r--r--
fs_path.h
40.57
KB
-rw-r--r--
fstream.tcc
32.88
KB
-rw-r--r--
functexcept.h
3.43
KB
-rw-r--r--
functional_hash.h
8.51
KB
-rw-r--r--
gslice.h
5.39
KB
-rw-r--r--
gslice_array.h
7.67
KB
-rw-r--r--
hash_bytes.h
2.1
KB
-rw-r--r--
hashtable.h
87.83
KB
-rw-r--r--
hashtable_policy.h
63.21
KB
-rw-r--r--
indirect_array.h
7.68
KB
-rw-r--r--
invoke.h
5.92
KB
-rw-r--r--
ios_base.h
31.43
KB
-rw-r--r--
istream.tcc
31.38
KB
-rw-r--r--
iterator_concepts.h
32.61
KB
-rw-r--r--
list.tcc
17.62
KB
-rw-r--r--
locale_classes.h
24.46
KB
-rw-r--r--
locale_classes.tcc
8.18
KB
-rw-r--r--
locale_conv.h
18.74
KB
-rw-r--r--
locale_facets.h
91.72
KB
-rw-r--r--
locale_facets.tcc
39.51
KB
-rw-r--r--
locale_facets_nonio.h
69.1
KB
-rw-r--r--
locale_facets_nonio.tcc
53.76
KB
-rw-r--r--
localefwd.h
5.8
KB
-rw-r--r--
mask_array.h
7.5
KB
-rw-r--r--
max_size_type.h
21.96
KB
-rw-r--r--
memoryfwd.h
2.45
KB
-rw-r--r--
mofunc_impl.h
7.52
KB
-rw-r--r--
move.h
6.63
KB
-rw-r--r--
move_only_function.h
5.73
KB
-rw-r--r--
nested_exception.h
4.77
KB
-rw-r--r--
new_allocator.h
6.47
KB
-rw-r--r--
node_handle.h
10.67
KB
-rw-r--r--
ostream.tcc
11.69
KB
-rw-r--r--
ostream_insert.h
3.94
KB
-rw-r--r--
parse_numbers.h
7.8
KB
-rw-r--r--
postypes.h
7.33
KB
-rw-r--r--
predefined_ops.h
9.94
KB
-rw-r--r--
ptr_traits.h
8.88
KB
-rw-r--r--
quoted_string.h
4.93
KB
-rw-r--r--
random.h
174.24
KB
-rw-r--r--
random.tcc
103.38
KB
-rw-r--r--
range_access.h
10.97
KB
-rw-r--r--
ranges_algo.h
114.1
KB
-rw-r--r--
ranges_algobase.h
18.4
KB
-rw-r--r--
ranges_base.h
25.61
KB
-rw-r--r--
ranges_cmp.h
5.83
KB
-rw-r--r--
ranges_uninitialized.h
17.71
KB
-rw-r--r--
ranges_util.h
21.3
KB
-rw-r--r--
refwrap.h
13.21
KB
-rw-r--r--
regex.h
101.77
KB
-rw-r--r--
regex.tcc
16.13
KB
-rw-r--r--
regex_automaton.h
10.54
KB
-rw-r--r--
regex_automaton.tcc
7.58
KB
-rw-r--r--
regex_compiler.h
15.92
KB
-rw-r--r--
regex_compiler.tcc
18.38
KB
-rw-r--r--
regex_constants.h
14.51
KB
-rw-r--r--
regex_error.h
5.03
KB
-rw-r--r--
regex_executor.h
8.8
KB
-rw-r--r--
regex_executor.tcc
18.41
KB
-rw-r--r--
regex_scanner.h
6.88
KB
-rw-r--r--
regex_scanner.tcc
14.54
KB
-rw-r--r--
semaphore_base.h
7.7
KB
-rw-r--r--
shared_ptr.h
38.14
KB
-rw-r--r--
shared_ptr_atomic.h
20.7
KB
-rw-r--r--
shared_ptr_base.h
65.23
KB
-rw-r--r--
slice_array.h
9.35
KB
-rw-r--r--
specfun.h
46.13
KB
-rw-r--r--
sstream.tcc
9.91
KB
-rw-r--r--
std_abs.h
3.37
KB
-rw-r--r--
std_function.h
22.34
KB
-rw-r--r--
std_mutex.h
6.27
KB
-rw-r--r--
std_thread.h
8.24
KB
-rw-r--r--
stl_algo.h
210.67
KB
-rw-r--r--
stl_algobase.h
75.01
KB
-rw-r--r--
stl_bvector.h
40.27
KB
-rw-r--r--
stl_construct.h
8.46
KB
-rw-r--r--
stl_deque.h
76.34
KB
-rw-r--r--
stl_function.h
43.97
KB
-rw-r--r--
stl_heap.h
20.31
KB
-rw-r--r--
stl_iterator.h
79.65
KB
-rw-r--r--
stl_iterator_base_funcs.h
8.57
KB
-rw-r--r--
stl_iterator_base_types.h
9.46
KB
-rw-r--r--
stl_list.h
70.68
KB
-rw-r--r--
stl_map.h
54.8
KB
-rw-r--r--
stl_multimap.h
42.67
KB
-rw-r--r--
stl_multiset.h
36.84
KB
-rw-r--r--
stl_numeric.h
14.26
KB
-rw-r--r--
stl_pair.h
29.41
KB
-rw-r--r--
stl_queue.h
28.06
KB
-rw-r--r--
stl_raw_storage_iter.h
3.91
KB
-rw-r--r--
stl_relops.h
4.47
KB
-rw-r--r--
stl_set.h
37.09
KB
-rw-r--r--
stl_stack.h
13.71
KB
-rw-r--r--
stl_tempbuf.h
8.48
KB
-rw-r--r--
stl_tree.h
72.04
KB
-rw-r--r--
stl_uninitialized.h
35.32
KB
-rw-r--r--
stl_vector.h
68.73
KB
-rw-r--r--
stream_iterator.h
8.09
KB
-rw-r--r--
streambuf.tcc
4.58
KB
-rw-r--r--
streambuf_iterator.h
15.53
KB
-rw-r--r--
string_view.tcc
7
KB
-rw-r--r--
stringfwd.h
2.56
KB
-rw-r--r--
this_thread_sleep.h
3.21
KB
-rw-r--r--
uniform_int_dist.h
12.77
KB
-rw-r--r--
unique_lock.h
6.01
KB
-rw-r--r--
unique_ptr.h
35.54
KB
-rw-r--r--
unordered_map.h
74.87
KB
-rw-r--r--
unordered_set.h
61.46
KB
-rw-r--r--
uses_allocator.h
6.74
KB
-rw-r--r--
uses_allocator_args.h
6.96
KB
-rw-r--r--
utility.h
8.12
KB
-rw-r--r--
valarray_after.h
22.74
KB
-rw-r--r--
valarray_array.h
20.8
KB
-rw-r--r--
valarray_array.tcc
7.08
KB
-rw-r--r--
valarray_before.h
18.69
KB
-rw-r--r--
vector.tcc
30.82
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : shared_ptr_atomic.h
// shared_ptr atomic access -*- C++ -*- // Copyright (C) 2014-2022 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file bits/shared_ptr_atomic.h * This is an internal header file, included by other library headers. * Do not attempt to use it directly. @headername{memory} */ #ifndef _SHARED_PTR_ATOMIC_H #define _SHARED_PTR_ATOMIC_H 1 #include <bits/atomic_base.h> namespace std _GLIBCXX_VISIBILITY(default) { _GLIBCXX_BEGIN_NAMESPACE_VERSION /** * @addtogroup pointer_abstractions * @{ */ /// @relates shared_ptr @{ /// @cond undocumented struct _Sp_locker { _Sp_locker(const _Sp_locker&) = delete; _Sp_locker& operator=(const _Sp_locker&) = delete; #ifdef __GTHREADS explicit _Sp_locker(const void*) noexcept; _Sp_locker(const void*, const void*) noexcept; ~_Sp_locker(); private: unsigned char _M_key1; unsigned char _M_key2; #else explicit _Sp_locker(const void*, const void* = nullptr) { } #endif }; /// @endcond /** * @brief Report whether shared_ptr atomic operations are lock-free. * @param __p A non-null pointer to a shared_ptr object. * @return True if atomic access to @c *__p is lock-free, false otherwise. * @{ */ template<typename _Tp, _Lock_policy _Lp> inline bool atomic_is_lock_free(const __shared_ptr<_Tp, _Lp>* __p) { #ifdef __GTHREADS return __gthread_active_p() == 0; #else return true; #endif } template<typename _Tp> inline bool atomic_is_lock_free(const shared_ptr<_Tp>* __p) { return std::atomic_is_lock_free<_Tp, __default_lock_policy>(__p); } /// @} /** * @brief Atomic load for shared_ptr objects. * @param __p A non-null pointer to a shared_ptr object. * @return @c *__p * * The memory order shall not be @c memory_order_release or * @c memory_order_acq_rel. * @{ */ template<typename _Tp> inline shared_ptr<_Tp> atomic_load_explicit(const shared_ptr<_Tp>* __p, memory_order) { _Sp_locker __lock{__p}; return *__p; } template<typename _Tp> inline shared_ptr<_Tp> atomic_load(const shared_ptr<_Tp>* __p) { return std::atomic_load_explicit(__p, memory_order_seq_cst); } template<typename _Tp, _Lock_policy _Lp> inline __shared_ptr<_Tp, _Lp> atomic_load_explicit(const __shared_ptr<_Tp, _Lp>* __p, memory_order) { _Sp_locker __lock{__p}; return *__p; } template<typename _Tp, _Lock_policy _Lp> inline __shared_ptr<_Tp, _Lp> atomic_load(const __shared_ptr<_Tp, _Lp>* __p) { return std::atomic_load_explicit(__p, memory_order_seq_cst); } /// @} /** * @brief Atomic store for shared_ptr objects. * @param __p A non-null pointer to a shared_ptr object. * @param __r The value to store. * * The memory order shall not be @c memory_order_acquire or * @c memory_order_acq_rel. * @{ */ template<typename _Tp> inline void atomic_store_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r, memory_order) { _Sp_locker __lock{__p}; __p->swap(__r); // use swap so that **__p not destroyed while lock held } template<typename _Tp> inline void atomic_store(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r) { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); } template<typename _Tp, _Lock_policy _Lp> inline void atomic_store_explicit(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r, memory_order) { _Sp_locker __lock{__p}; __p->swap(__r); // use swap so that **__p not destroyed while lock held } template<typename _Tp, _Lock_policy _Lp> inline void atomic_store(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r) { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); } /// @} /** * @brief Atomic exchange for shared_ptr objects. * @param __p A non-null pointer to a shared_ptr object. * @param __r New value to store in @c *__p. * @return The original value of @c *__p * @{ */ template<typename _Tp> inline shared_ptr<_Tp> atomic_exchange_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r, memory_order) { _Sp_locker __lock{__p}; __p->swap(__r); return __r; } template<typename _Tp> inline shared_ptr<_Tp> atomic_exchange(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r) { return std::atomic_exchange_explicit(__p, std::move(__r), memory_order_seq_cst); } template<typename _Tp, _Lock_policy _Lp> inline __shared_ptr<_Tp, _Lp> atomic_exchange_explicit(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r, memory_order) { _Sp_locker __lock{__p}; __p->swap(__r); return __r; } template<typename _Tp, _Lock_policy _Lp> inline __shared_ptr<_Tp, _Lp> atomic_exchange(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r) { return std::atomic_exchange_explicit(__p, std::move(__r), memory_order_seq_cst); } /// @} /** * @brief Atomic compare-and-swap for shared_ptr objects. * @param __p A non-null pointer to a shared_ptr object. * @param __v A non-null pointer to a shared_ptr object. * @param __w A non-null pointer to a shared_ptr object. * @return True if @c *__p was equivalent to @c *__v, false otherwise. * * The memory order for failure shall not be @c memory_order_release or * @c memory_order_acq_rel, or stronger than the memory order for success. * @{ */ template<typename _Tp> bool atomic_compare_exchange_strong_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w, memory_order, memory_order) { shared_ptr<_Tp> __x; // goes out of scope after __lock _Sp_locker __lock{__p, __v}; owner_less<shared_ptr<_Tp>> __less; if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p)) { __x = std::move(*__p); *__p = std::move(__w); return true; } __x = std::move(*__v); *__v = *__p; return false; } template<typename _Tp> inline bool atomic_compare_exchange_strong(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w) { return std::atomic_compare_exchange_strong_explicit(__p, __v, std::move(__w), memory_order_seq_cst, memory_order_seq_cst); } template<typename _Tp> inline bool atomic_compare_exchange_weak_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w, memory_order __success, memory_order __failure) { return std::atomic_compare_exchange_strong_explicit(__p, __v, std::move(__w), __success, __failure); } template<typename _Tp> inline bool atomic_compare_exchange_weak(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w) { return std::atomic_compare_exchange_weak_explicit(__p, __v, std::move(__w), memory_order_seq_cst, memory_order_seq_cst); } template<typename _Tp, _Lock_policy _Lp> bool atomic_compare_exchange_strong_explicit(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp>* __v, __shared_ptr<_Tp, _Lp> __w, memory_order, memory_order) { __shared_ptr<_Tp, _Lp> __x; // goes out of scope after __lock _Sp_locker __lock{__p, __v}; owner_less<__shared_ptr<_Tp, _Lp>> __less; if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p)) { __x = std::move(*__p); *__p = std::move(__w); return true; } __x = std::move(*__v); *__v = *__p; return false; } template<typename _Tp, _Lock_policy _Lp> inline bool atomic_compare_exchange_strong(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp>* __v, __shared_ptr<_Tp, _Lp> __w) { return std::atomic_compare_exchange_strong_explicit(__p, __v, std::move(__w), memory_order_seq_cst, memory_order_seq_cst); } template<typename _Tp, _Lock_policy _Lp> inline bool atomic_compare_exchange_weak_explicit(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp>* __v, __shared_ptr<_Tp, _Lp> __w, memory_order __success, memory_order __failure) { return std::atomic_compare_exchange_strong_explicit(__p, __v, std::move(__w), __success, __failure); } template<typename _Tp, _Lock_policy _Lp> inline bool atomic_compare_exchange_weak(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp>* __v, __shared_ptr<_Tp, _Lp> __w) { return std::atomic_compare_exchange_weak_explicit(__p, __v, std::move(__w), memory_order_seq_cst, memory_order_seq_cst); } /// @} #if __cplusplus >= 202002L # define __cpp_lib_atomic_shared_ptr 201711L template<typename _Tp> class atomic; template<typename _Up> static constexpr bool __is_shared_ptr = false; template<typename _Up> static constexpr bool __is_shared_ptr<shared_ptr<_Up>> = true; template<typename _Tp> class _Sp_atomic { using value_type = _Tp; friend class atomic<_Tp>; // An atomic version of __shared_count<> and __weak_count<>. // Stores a _Sp_counted_base<>* but uses the LSB as a lock. struct _Atomic_count { // Either __shared_count<> or __weak_count<> using __count_type = decltype(_Tp::_M_refcount); // _Sp_counted_base<>* using pointer = decltype(__count_type::_M_pi); // Ensure we can use the LSB as the lock bit. static_assert(alignof(remove_pointer_t<pointer>) > 1); constexpr _Atomic_count() noexcept = default; explicit _Atomic_count(__count_type&& __c) noexcept : _M_val(reinterpret_cast<uintptr_t>(__c._M_pi)) { __c._M_pi = nullptr; } ~_Atomic_count() { auto __val = _M_val.load(memory_order_relaxed); __glibcxx_assert(!(__val & _S_lock_bit)); if (auto __pi = reinterpret_cast<pointer>(__val)) { if constexpr (__is_shared_ptr<_Tp>) __pi->_M_release(); else __pi->_M_weak_release(); } } _Atomic_count(const _Atomic_count&) = delete; _Atomic_count& operator=(const _Atomic_count&) = delete; // Precondition: Caller does not hold lock! // Returns the raw pointer value without the lock bit set. pointer lock(memory_order __o) const noexcept { // To acquire the lock we flip the LSB from 0 to 1. auto __current = _M_val.load(memory_order_relaxed); while (__current & _S_lock_bit) { #if __cpp_lib_atomic_wait __detail::__thread_relax(); #endif __current = _M_val.load(memory_order_relaxed); } while (!_M_val.compare_exchange_strong(__current, __current | _S_lock_bit, __o, memory_order_relaxed)) { #if __cpp_lib_atomic_wait __detail::__thread_relax(); #endif __current = __current & ~_S_lock_bit; } return reinterpret_cast<pointer>(__current); } // Precondition: caller holds lock! void unlock(memory_order __o) const noexcept { _M_val.fetch_sub(1, __o); } // Swaps the values of *this and __c, and unlocks *this. // Precondition: caller holds lock! void _M_swap_unlock(__count_type& __c, memory_order __o) noexcept { if (__o != memory_order_seq_cst) __o = memory_order_release; auto __x = reinterpret_cast<uintptr_t>(__c._M_pi); __x = _M_val.exchange(__x, __o); __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit); } #if __cpp_lib_atomic_wait // Precondition: caller holds lock! void _M_wait_unlock(memory_order __o) const noexcept { auto __v = _M_val.fetch_sub(1, memory_order_relaxed); _M_val.wait(__v & ~_S_lock_bit, __o); } void notify_one() noexcept { _M_val.notify_one(); } void notify_all() noexcept { _M_val.notify_all(); } #endif private: mutable __atomic_base<uintptr_t> _M_val{0}; static constexpr uintptr_t _S_lock_bit{1}; }; typename _Tp::element_type* _M_ptr = nullptr; _Atomic_count _M_refcount; static typename _Atomic_count::pointer _S_add_ref(typename _Atomic_count::pointer __p) { if (__p) { if constexpr (__is_shared_ptr<_Tp>) __p->_M_add_ref_copy(); else __p->_M_weak_add_ref(); } return __p; } constexpr _Sp_atomic() noexcept = default; explicit _Sp_atomic(value_type __r) noexcept : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount)) { } ~_Sp_atomic() = default; _Sp_atomic(const _Sp_atomic&) = delete; void operator=(const _Sp_atomic&) = delete; value_type load(memory_order __o) const noexcept { __glibcxx_assert(__o != memory_order_release && __o != memory_order_acq_rel); // Ensure that the correct value of _M_ptr is visible after locking., // by upgrading relaxed or consume to acquire. if (__o != memory_order_seq_cst) __o = memory_order_acquire; value_type __ret; auto __pi = _M_refcount.lock(__o); __ret._M_ptr = _M_ptr; __ret._M_refcount._M_pi = _S_add_ref(__pi); _M_refcount.unlock(memory_order_relaxed); return __ret; } void swap(value_type& __r, memory_order __o) noexcept { _M_refcount.lock(memory_order_acquire); std::swap(_M_ptr, __r._M_ptr); _M_refcount._M_swap_unlock(__r._M_refcount, __o); } bool compare_exchange_strong(value_type& __expected, value_type __desired, memory_order __o, memory_order __o2) noexcept { bool __result = true; auto __pi = _M_refcount.lock(memory_order_acquire); if (_M_ptr == __expected._M_ptr && __pi == __expected._M_refcount._M_pi) { _M_ptr = __desired._M_ptr; _M_refcount._M_swap_unlock(__desired._M_refcount, __o); } else { _Tp __sink = std::move(__expected); __expected._M_ptr = _M_ptr; __expected._M_refcount._M_pi = _S_add_ref(__pi); _M_refcount.unlock(__o2); __result = false; } return __result; } #if __cpp_lib_atomic_wait void wait(value_type __old, memory_order __o) const noexcept { auto __pi = _M_refcount.lock(memory_order_acquire); if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi) _M_refcount._M_wait_unlock(__o); else _M_refcount.unlock(memory_order_relaxed); } void notify_one() noexcept { _M_refcount.notify_one(); } void notify_all() noexcept { _M_refcount.notify_all(); } #endif }; template<typename _Tp> class atomic<shared_ptr<_Tp>> { public: using value_type = shared_ptr<_Tp>; static constexpr bool is_always_lock_free = false; bool is_lock_free() const noexcept { return false; } constexpr atomic() noexcept = default; // _GLIBCXX_RESOLVE_LIB_DEFECTS // 3661. constinit atomic<shared_ptr<T>> a(nullptr); should work constexpr atomic(nullptr_t) noexcept : atomic() { } atomic(shared_ptr<_Tp> __r) noexcept : _M_impl(std::move(__r)) { } atomic(const atomic&) = delete; void operator=(const atomic&) = delete; shared_ptr<_Tp> load(memory_order __o = memory_order_seq_cst) const noexcept { return _M_impl.load(__o); } operator shared_ptr<_Tp>() const noexcept { return _M_impl.load(memory_order_seq_cst); } void store(shared_ptr<_Tp> __desired, memory_order __o = memory_order_seq_cst) noexcept { _M_impl.swap(__desired, __o); } void operator=(shared_ptr<_Tp> __desired) noexcept { _M_impl.swap(__desired, memory_order_seq_cst); } shared_ptr<_Tp> exchange(shared_ptr<_Tp> __desired, memory_order __o = memory_order_seq_cst) noexcept { _M_impl.swap(__desired, __o); return __desired; } bool compare_exchange_strong(shared_ptr<_Tp>& __expected, shared_ptr<_Tp> __desired, memory_order __o, memory_order __o2) noexcept { return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2); } bool compare_exchange_strong(value_type& __expected, value_type __desired, memory_order __o = memory_order_seq_cst) noexcept { memory_order __o2; switch (__o) { case memory_order_acq_rel: __o2 = memory_order_acquire; break; case memory_order_release: __o2 = memory_order_relaxed; break; default: __o2 = __o; } return compare_exchange_strong(__expected, std::move(__desired), __o, __o2); } bool compare_exchange_weak(value_type& __expected, value_type __desired, memory_order __o, memory_order __o2) noexcept { return compare_exchange_strong(__expected, std::move(__desired), __o, __o2); } bool compare_exchange_weak(value_type& __expected, value_type __desired, memory_order __o = memory_order_seq_cst) noexcept { return compare_exchange_strong(__expected, std::move(__desired), __o); } #if __cpp_lib_atomic_wait void wait(value_type __old, memory_order __o = memory_order_seq_cst) const noexcept { _M_impl.wait(std::move(__old), __o); } void notify_one() noexcept { _M_impl.notify_one(); } void notify_all() noexcept { _M_impl.notify_all(); } #endif private: _Sp_atomic<shared_ptr<_Tp>> _M_impl; }; template<typename _Tp> class atomic<weak_ptr<_Tp>> { public: using value_type = weak_ptr<_Tp>; static constexpr bool is_always_lock_free = false; bool is_lock_free() const noexcept { return false; } constexpr atomic() noexcept = default; atomic(weak_ptr<_Tp> __r) noexcept : _M_impl(move(__r)) { } atomic(const atomic&) = delete; void operator=(const atomic&) = delete; weak_ptr<_Tp> load(memory_order __o = memory_order_seq_cst) const noexcept { return _M_impl.load(__o); } operator weak_ptr<_Tp>() const noexcept { return _M_impl.load(memory_order_seq_cst); } void store(weak_ptr<_Tp> __desired, memory_order __o = memory_order_seq_cst) noexcept { _M_impl.swap(__desired, __o); } void operator=(weak_ptr<_Tp> __desired) noexcept { _M_impl.swap(__desired, memory_order_seq_cst); } weak_ptr<_Tp> exchange(weak_ptr<_Tp> __desired, memory_order __o = memory_order_seq_cst) noexcept { _M_impl.swap(__desired, __o); return __desired; } bool compare_exchange_strong(weak_ptr<_Tp>& __expected, weak_ptr<_Tp> __desired, memory_order __o, memory_order __o2) noexcept { return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2); } bool compare_exchange_strong(value_type& __expected, value_type __desired, memory_order __o = memory_order_seq_cst) noexcept { memory_order __o2; switch (__o) { case memory_order_acq_rel: __o2 = memory_order_acquire; break; case memory_order_release: __o2 = memory_order_relaxed; break; default: __o2 = __o; } return compare_exchange_strong(__expected, std::move(__desired), __o, __o2); } bool compare_exchange_weak(value_type& __expected, value_type __desired, memory_order __o, memory_order __o2) noexcept { return compare_exchange_strong(__expected, std::move(__desired), __o, __o2); } bool compare_exchange_weak(value_type& __expected, value_type __desired, memory_order __o = memory_order_seq_cst) noexcept { return compare_exchange_strong(__expected, std::move(__desired), __o); } #if __cpp_lib_atomic_wait void wait(value_type __old, memory_order __o = memory_order_seq_cst) const noexcept { _M_impl.wait(std::move(__old), __o); } void notify_one() noexcept { _M_impl.notify_one(); } void notify_all() noexcept { _M_impl.notify_all(); } #endif private: _Sp_atomic<weak_ptr<_Tp>> _M_impl; }; #endif // C++20 /// @} relates shared_ptr /// @} group pointer_abstractions _GLIBCXX_END_NAMESPACE_VERSION } // namespace #endif // _SHARED_PTR_ATOMIC_H
Close