整理
This commit is contained in:
65
include/boost/atomic/detail/addressof.hpp
Normal file
65
include/boost/atomic/detail/addressof.hpp
Normal file
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018, 2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/addressof.hpp
|
||||
*
|
||||
* This header defines \c addressof helper function. It is similar to \c boost::addressof but it is more
|
||||
* lightweight and also contains a workaround for some compiler warnings.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
// Detection logic is based on boost/core/addressof.hpp
|
||||
#if defined(BOOST_MSVC_FULL_VER) && BOOST_MSVC_FULL_VER >= 190024215
|
||||
#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF
|
||||
#elif defined(BOOST_GCC) && BOOST_GCC >= 70000
|
||||
#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF
|
||||
#elif defined(__has_builtin)
|
||||
#if __has_builtin(__builtin_addressof)
|
||||
#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename T >
|
||||
BOOST_FORCEINLINE
|
||||
#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF)
|
||||
constexpr
|
||||
#endif
|
||||
T* addressof(T& value) noexcept
|
||||
{
|
||||
#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF)
|
||||
return __builtin_addressof(value);
|
||||
#else
|
||||
// Note: The point of using a local struct as the intermediate type instead of char is to avoid gcc warnings
|
||||
// if T is a const volatile char*:
|
||||
// warning: casting 'const volatile char* const' to 'const volatile char&' does not dereference pointer
|
||||
// The local struct makes sure T is not related to the cast target type.
|
||||
struct opaque_type;
|
||||
return reinterpret_cast< T* >(&const_cast< opaque_type& >(reinterpret_cast< const volatile opaque_type& >(value)));
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_
|
||||
57
include/boost/atomic/detail/aligned_variable.hpp
Normal file
57
include/boost/atomic/detail/aligned_variable.hpp
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/aligned_variable.hpp
|
||||
*
|
||||
* This header defines a convenience macro for declaring aligned variables
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_ALIGNED_VARIABLE_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_ALIGNED_VARIABLE_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#if defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
|
||||
#include <boost/config/helper_macros.hpp>
|
||||
#include <boost/type_traits/type_with_alignment.hpp>
|
||||
#endif
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
|
||||
|
||||
#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR(var_alignment, var_type, var_name) \
|
||||
alignas(var_alignment) var_type var_name
|
||||
|
||||
#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(var_alignment, var_type, var_name) \
|
||||
alignas(var_alignment) var_type var_name
|
||||
|
||||
#else // !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
|
||||
|
||||
// Note: Some compilers cannot use constant expressions in alignment attributes or alignas, so we have to use the union trick
|
||||
#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR(var_alignment, var_type, var_name) \
|
||||
union \
|
||||
{ \
|
||||
var_type var_name; \
|
||||
boost::type_with_alignment< var_alignment >::type BOOST_JOIN(var_name, _aligner); \
|
||||
}
|
||||
|
||||
#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(var_alignment, var_type, var_name) \
|
||||
union \
|
||||
{ \
|
||||
var_type var_name; \
|
||||
typename boost::type_with_alignment< var_alignment >::type BOOST_JOIN(var_name, _aligner); \
|
||||
}
|
||||
|
||||
#endif // !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_ALIGNED_VARIABLE_HPP_INCLUDED_
|
||||
157
include/boost/atomic/detail/atomic_flag_impl.hpp
Normal file
157
include/boost/atomic/detail/atomic_flag_impl.hpp
Normal file
@@ -0,0 +1,157 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2011 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2014, 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/atomic_flag_impl.hpp
|
||||
*
|
||||
* This header contains implementation of \c atomic_flag.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_IMPL_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_IMPL_HPP_INCLUDED_
|
||||
|
||||
#include <chrono>
|
||||
#include <utility>
|
||||
#include <type_traits>
|
||||
#include <boost/assert.hpp>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/wait_result.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/core_operations.hpp>
|
||||
#include <boost/atomic/detail/wait_operations.hpp>
|
||||
#include <boost/atomic/detail/aligned_variable.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
/*
|
||||
* IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
|
||||
* see comment for convert_memory_order_to_gcc in gcc_atomic_memory_order_utils.hpp.
|
||||
*/
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
#define BOOST_ATOMIC_FLAG_INIT {}
|
||||
|
||||
//! Atomic flag implementation
|
||||
template< bool IsInterprocess >
|
||||
struct atomic_flag_impl
|
||||
{
|
||||
// Prefer 4-byte storage as most platforms support waiting/notifying operations without a lock pool for 32-bit integers
|
||||
using core_operations = atomics::detail::core_operations< 4u, false, IsInterprocess >;
|
||||
using wait_operations = atomics::detail::wait_operations< core_operations >;
|
||||
using storage_type = typename core_operations::storage_type;
|
||||
|
||||
static constexpr bool is_always_lock_free = core_operations::is_always_lock_free;
|
||||
static constexpr bool always_has_native_wait_notify = wait_operations::always_has_native_wait_notify;
|
||||
|
||||
BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(core_operations::storage_alignment, storage_type, m_storage);
|
||||
|
||||
BOOST_FORCEINLINE constexpr atomic_flag_impl() noexcept : m_storage(0u)
|
||||
{
|
||||
}
|
||||
|
||||
atomic_flag_impl(atomic_flag_impl const&) = delete;
|
||||
atomic_flag_impl& operator= (atomic_flag_impl const&) = delete;
|
||||
|
||||
BOOST_FORCEINLINE bool is_lock_free() const volatile noexcept
|
||||
{
|
||||
return is_always_lock_free;
|
||||
}
|
||||
|
||||
BOOST_FORCEINLINE bool has_native_wait_notify() const volatile noexcept
|
||||
{
|
||||
return wait_operations::has_native_wait_notify(m_storage);
|
||||
}
|
||||
|
||||
BOOST_FORCEINLINE bool test(memory_order order = memory_order_seq_cst) const volatile noexcept
|
||||
{
|
||||
BOOST_ASSERT(order != memory_order_release);
|
||||
BOOST_ASSERT(order != memory_order_acq_rel);
|
||||
return !!core_operations::load(m_storage, order);
|
||||
}
|
||||
|
||||
BOOST_FORCEINLINE bool test_and_set(memory_order order = memory_order_seq_cst) volatile noexcept
|
||||
{
|
||||
return core_operations::test_and_set(m_storage, order);
|
||||
}
|
||||
|
||||
BOOST_FORCEINLINE void clear(memory_order order = memory_order_seq_cst) volatile noexcept
|
||||
{
|
||||
BOOST_ASSERT(order != memory_order_consume);
|
||||
BOOST_ASSERT(order != memory_order_acquire);
|
||||
BOOST_ASSERT(order != memory_order_acq_rel);
|
||||
core_operations::clear(m_storage, order);
|
||||
}
|
||||
|
||||
BOOST_FORCEINLINE bool wait(bool old_val, memory_order order = memory_order_seq_cst) const volatile noexcept
|
||||
{
|
||||
BOOST_ASSERT(order != memory_order_release);
|
||||
BOOST_ASSERT(order != memory_order_acq_rel);
|
||||
|
||||
return !!wait_operations::wait(m_storage, static_cast< storage_type >(old_val), order);
|
||||
}
|
||||
|
||||
template< typename Clock, typename Duration >
|
||||
BOOST_FORCEINLINE wait_result< bool >
|
||||
wait_until(bool old_val, std::chrono::time_point< Clock, Duration > timeout, memory_order order = memory_order_seq_cst) const volatile
|
||||
noexcept(noexcept(wait_operations::wait_until(
|
||||
std::declval< storage_type const volatile& >(), std::declval< storage_type >(), timeout, order, std::declval< bool& >())))
|
||||
{
|
||||
BOOST_ASSERT(order != memory_order_release);
|
||||
BOOST_ASSERT(order != memory_order_acq_rel);
|
||||
|
||||
bool timed_out = false;
|
||||
storage_type new_value = wait_operations::wait_until(m_storage, static_cast< storage_type >(old_val), timeout, order, timed_out);
|
||||
return wait_result< bool >(!!new_value, timed_out);
|
||||
}
|
||||
|
||||
template< typename Rep, typename Period >
|
||||
BOOST_FORCEINLINE wait_result< bool >
|
||||
wait_for(bool old_val, std::chrono::duration< Rep, Period > timeout, memory_order order = memory_order_seq_cst) const volatile
|
||||
noexcept(noexcept(wait_operations::wait_for(
|
||||
std::declval< storage_type const volatile& >(), std::declval< storage_type >(), timeout, order, std::declval< bool& >())))
|
||||
{
|
||||
BOOST_ASSERT(order != memory_order_release);
|
||||
BOOST_ASSERT(order != memory_order_acq_rel);
|
||||
|
||||
bool timed_out = false;
|
||||
storage_type new_value = wait_operations::wait_for(m_storage, static_cast< storage_type >(old_val), timeout, order, timed_out);
|
||||
return wait_result< bool >(!!new_value, timed_out);
|
||||
}
|
||||
|
||||
BOOST_FORCEINLINE void notify_one() volatile noexcept
|
||||
{
|
||||
wait_operations::notify_one(m_storage);
|
||||
}
|
||||
|
||||
BOOST_FORCEINLINE void notify_all() volatile noexcept
|
||||
{
|
||||
wait_operations::notify_all(m_storage);
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(BOOST_NO_CXX17_INLINE_VARIABLES)
|
||||
template< bool IsInterprocess >
|
||||
constexpr bool atomic_flag_impl< IsInterprocess >::is_always_lock_free;
|
||||
template< bool IsInterprocess >
|
||||
constexpr bool atomic_flag_impl< IsInterprocess >::always_has_native_wait_notify;
|
||||
#endif
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_IMPL_HPP_INCLUDED_
|
||||
1621
include/boost/atomic/detail/atomic_impl.hpp
Normal file
1621
include/boost/atomic/detail/atomic_impl.hpp
Normal file
File diff suppressed because it is too large
Load Diff
1561
include/boost/atomic/detail/atomic_ref_impl.hpp
Normal file
1561
include/boost/atomic/detail/atomic_ref_impl.hpp
Normal file
File diff suppressed because it is too large
Load Diff
171
include/boost/atomic/detail/bitwise_cast.hpp
Normal file
171
include/boost/atomic/detail/bitwise_cast.hpp
Normal file
@@ -0,0 +1,171 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2012 Tim Blechmann
|
||||
* Copyright (c) 2013-2018, 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/bitwise_cast.hpp
|
||||
*
|
||||
* This header defines \c bitwise_cast used to convert between storage and value types
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <type_traits>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/addressof.hpp>
|
||||
#include <boost/atomic/detail/string_ops.hpp>
|
||||
#include <boost/atomic/detail/type_traits/is_trivially_copyable.hpp>
|
||||
#include <boost/atomic/detail/type_traits/has_unique_object_representations.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_NO_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
|
||||
|
||||
#if defined(__has_builtin)
|
||||
#if __has_builtin(__builtin_bit_cast)
|
||||
#define BOOST_ATOMIC_DETAIL_BIT_CAST(x, y) __builtin_bit_cast(x, y)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_BIT_CAST) && defined(BOOST_MSVC) && BOOST_MSVC >= 1926
|
||||
#define BOOST_ATOMIC_DETAIL_BIT_CAST(x, y) __builtin_bit_cast(x, y)
|
||||
#endif
|
||||
|
||||
#endif // !defined(BOOST_ATOMIC_DETAIL_NO_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_BIT_CAST) || !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF)
|
||||
#define BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_BITWISE_CAST
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_BITWISE_CAST)
|
||||
#define BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST constexpr
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_GCC) && BOOST_GCC >= 80000
|
||||
#pragma GCC diagnostic push
|
||||
// copying an object of non-trivial type X from an array of Y. This is benign because we use memcpy to copy trivially copyable objects.
|
||||
#pragma GCC diagnostic ignored "-Wclass-memaccess"
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< std::size_t ValueSize, typename To >
|
||||
BOOST_FORCEINLINE void clear_tail_padding_bits(To& to, std::true_type) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_MEMSET(reinterpret_cast< unsigned char* >(atomics::detail::addressof(to)) + ValueSize, 0, sizeof(To) - ValueSize);
|
||||
}
|
||||
|
||||
template< std::size_t ValueSize, typename To >
|
||||
BOOST_FORCEINLINE void clear_tail_padding_bits(To&, std::false_type) noexcept
|
||||
{
|
||||
}
|
||||
|
||||
template< std::size_t ValueSize, typename To >
|
||||
BOOST_FORCEINLINE void clear_tail_padding_bits(To& to) noexcept
|
||||
{
|
||||
atomics::detail::clear_tail_padding_bits< ValueSize >(to, std::integral_constant< bool, ValueSize < sizeof(To) >());
|
||||
}
|
||||
|
||||
template< typename To, std::size_t FromValueSize, typename From >
|
||||
BOOST_FORCEINLINE To bitwise_cast_memcpy(From const& from) noexcept
|
||||
{
|
||||
using unqualified_to_t = typename std::remove_cv< To >::type;
|
||||
static_assert(atomics::detail::is_trivially_copyable< unqualified_to_t >::value, "bitwise_cast target type must be trivially copyable");
|
||||
static_assert(atomics::detail::is_trivially_copyable< From >::value, "bitwise_cast source type must be trivially copyable");
|
||||
|
||||
// Suppress default constructor of To as it may potentially be a non-trivial throwing constructor
|
||||
union cast_helper
|
||||
{
|
||||
unsigned char as_bytes[sizeof(unqualified_to_t)];
|
||||
unqualified_to_t as_to;
|
||||
|
||||
BOOST_FORCEINLINE cast_helper() noexcept {}
|
||||
}
|
||||
storage;
|
||||
|
||||
#if !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
|
||||
From from2(from);
|
||||
BOOST_ATOMIC_DETAIL_CLEAR_PADDING(atomics::detail::addressof(from2));
|
||||
BOOST_ATOMIC_DETAIL_MEMCPY
|
||||
(
|
||||
atomics::detail::addressof(storage.as_to),
|
||||
atomics::detail::addressof(from2),
|
||||
(FromValueSize < sizeof(unqualified_to_t) ? FromValueSize : sizeof(unqualified_to_t))
|
||||
);
|
||||
#else
|
||||
BOOST_ATOMIC_DETAIL_MEMCPY
|
||||
(
|
||||
atomics::detail::addressof(storage.as_to),
|
||||
atomics::detail::addressof(from),
|
||||
(FromValueSize < sizeof(unqualified_to_t) ? FromValueSize : sizeof(unqualified_to_t))
|
||||
);
|
||||
#endif
|
||||
atomics::detail::clear_tail_padding_bits< FromValueSize >(storage.as_to);
|
||||
return storage.as_to;
|
||||
}
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
|
||||
|
||||
template< typename To, std::size_t FromValueSize, typename From >
|
||||
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_cast_impl(From const& from, std::true_type) noexcept
|
||||
{
|
||||
// This implementation is only called when the From type has no padding and From and To have the same size
|
||||
return BOOST_ATOMIC_DETAIL_BIT_CAST(typename std::remove_cv< To >::type, from);
|
||||
}
|
||||
|
||||
template< typename To, std::size_t FromValueSize, typename From >
|
||||
BOOST_FORCEINLINE To bitwise_cast_impl(From const& from, std::false_type) noexcept
|
||||
{
|
||||
return atomics::detail::bitwise_cast_memcpy< To, FromValueSize >(from);
|
||||
}
|
||||
|
||||
template< typename To, std::size_t FromValueSize, typename From >
|
||||
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_cast(From const& from) noexcept
|
||||
{
|
||||
return atomics::detail::bitwise_cast_impl< To, FromValueSize >(from, std::integral_constant< bool,
|
||||
FromValueSize == sizeof(To) && atomics::detail::has_unique_object_representations< From >::value >());
|
||||
}
|
||||
|
||||
#else // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
|
||||
|
||||
template< typename To, std::size_t FromValueSize, typename From >
|
||||
BOOST_FORCEINLINE To bitwise_cast(From const& from) noexcept
|
||||
{
|
||||
return atomics::detail::bitwise_cast_memcpy< To, FromValueSize >(from);
|
||||
}
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
|
||||
|
||||
//! Converts the source object to the target type, possibly by padding or truncating it on the right, and clearing any padding bits (if supported by compiler).
|
||||
//! Preserves value bits unchanged.
|
||||
template< typename To, typename From >
|
||||
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_cast(From const& from) noexcept
|
||||
{
|
||||
return atomics::detail::bitwise_cast< To, sizeof(From) >(from);
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#if defined(BOOST_GCC) && BOOST_GCC >= 80000
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
|
||||
118
include/boost/atomic/detail/bitwise_fp_cast.hpp
Normal file
118
include/boost/atomic/detail/bitwise_fp_cast.hpp
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/bitwise_fp_cast.hpp
|
||||
*
|
||||
* This header defines \c bitwise_fp_cast used to convert between storage and floating point value types
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/float_sizes.hpp>
|
||||
#include <boost/atomic/detail/bitwise_cast.hpp>
|
||||
#if defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
|
||||
#include <type_traits>
|
||||
#endif
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
/*!
|
||||
* \brief The type trait returns the size of the value of the specified floating point type
|
||||
*
|
||||
* This size may be less than <tt>sizeof(T)</tt> if the implementation uses padding bytes for a particular FP type. This is
|
||||
* often the case with 80-bit extended double, which is stored in 12 or 16 initial bytes with tail padding filled with garbage.
|
||||
*/
|
||||
template< typename T >
|
||||
struct value_size_of
|
||||
{
|
||||
static constexpr std::size_t value = sizeof(T);
|
||||
};
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE)
|
||||
template< >
|
||||
struct value_size_of< float >
|
||||
{
|
||||
static constexpr std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE;
|
||||
};
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE)
|
||||
template< >
|
||||
struct value_size_of< double >
|
||||
{
|
||||
static constexpr std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE;
|
||||
};
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE)
|
||||
template< >
|
||||
struct value_size_of< long double >
|
||||
{
|
||||
static constexpr std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE;
|
||||
};
|
||||
#endif
|
||||
|
||||
template< typename T >
|
||||
struct value_size_of< const T > : value_size_of< T > {};
|
||||
|
||||
template< typename T >
|
||||
struct value_size_of< volatile T > : value_size_of< T > {};
|
||||
|
||||
template< typename T >
|
||||
struct value_size_of< const volatile T > : value_size_of< T > {};
|
||||
|
||||
|
||||
#if !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
|
||||
// BOOST_ATOMIC_DETAIL_CLEAR_PADDING, which is used in bitwise_cast, will clear the tail padding bits in the source object.
|
||||
// We don't need to specify the actual value size to avoid redundant zeroing of the tail padding.
|
||||
#define BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(x) sizeof(x)
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(x) atomics::detail::value_size_of< x >::value
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
|
||||
|
||||
//! Similar to bitwise_cast, but either \c From or \c To is expected to be a floating point type. Attempts to detect the actual value size in the source object and considers the rest of the object as padding.
|
||||
template< typename To, typename From >
|
||||
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_fp_cast(From const& from) noexcept
|
||||
{
|
||||
// For floating point types, has_unique_object_representations is typically false even if the type contains no padding bits.
|
||||
// Here, we rely on our detection of the actual value size to select constexpr bit_cast implementation when possible. We assume
|
||||
// here that floating point value bits are contiguous.
|
||||
return atomics::detail::bitwise_cast_impl< To, BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(From) >(from, std::integral_constant< bool,
|
||||
atomics::detail::value_size_of< From >::value == sizeof(From) && atomics::detail::value_size_of< From >::value == sizeof(To) >());
|
||||
}
|
||||
|
||||
#else // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
|
||||
|
||||
//! Similar to bitwise_cast, but either \c From or \c To is expected to be a floating point type. Attempts to detect the actual value size in the source object and considers the rest of the object as padding.
|
||||
template< typename To, typename From >
|
||||
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_fp_cast(From const& from) noexcept
|
||||
{
|
||||
return atomics::detail::bitwise_cast< To, BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(From) >(from);
|
||||
}
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_HPP_INCLUDED_
|
||||
217
include/boost/atomic/detail/capabilities.hpp
Normal file
217
include/boost/atomic/detail/capabilities.hpp
Normal file
@@ -0,0 +1,217 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/capabilities.hpp
|
||||
*
|
||||
* This header defines core feature capabilities macros.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPABILITIES_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPABILITIES_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/platform.hpp>
|
||||
#include <boost/atomic/detail/int_sizes.hpp>
|
||||
#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
|
||||
#include <boost/atomic/detail/float_sizes.hpp>
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER)
|
||||
#include BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER(boost/atomic/detail/caps_)
|
||||
#elif defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
|
||||
#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/caps_arch_)
|
||||
#endif
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_INT128_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT128_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE
|
||||
#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_CHAR8_T_LOCK_FREE
|
||||
#define BOOST_ATOMIC_CHAR8_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE
|
||||
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE
|
||||
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1
|
||||
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2
|
||||
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4
|
||||
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8
|
||||
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1
|
||||
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
|
||||
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
|
||||
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
|
||||
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_INT_LOCK_FREE
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1
|
||||
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
|
||||
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
|
||||
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
|
||||
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_INT_LOCK_FREE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_LONG_LOCK_FREE
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1
|
||||
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
|
||||
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
|
||||
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
|
||||
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_LONG_LOCK_FREE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1
|
||||
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
|
||||
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
|
||||
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
|
||||
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE
|
||||
#if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#elif (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 4
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE
|
||||
|
||||
#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE
|
||||
// We store bools in 1-byte storage in all backends
|
||||
#define BOOST_ATOMIC_BOOL_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
|
||||
// atomic_flag uses 4-byte storage
|
||||
#define BOOST_ATOMIC_FLAG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
|
||||
|
||||
#if !defined(BOOST_ATOMIC_FLOAT_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT)
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 2
|
||||
#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 4
|
||||
#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 8
|
||||
#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT <= 16
|
||||
#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_FLOAT_LOCK_FREE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DOUBLE_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE)
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 2
|
||||
#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 4
|
||||
#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 8
|
||||
#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE <= 16
|
||||
#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_DOUBLE_LOCK_FREE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 2
|
||||
#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 4
|
||||
#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 8
|
||||
#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE <= 16
|
||||
#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif // !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
|
||||
|
||||
#ifndef BOOST_ATOMIC_THREAD_FENCE
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_SIGNAL_FENCE
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 0
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPABILITIES_HPP_INCLUDED_
|
||||
53
include/boost/atomic/detail/caps_arch_gcc_aarch32.hpp
Normal file
53
include/boost/atomic/detail/caps_arch_gcc_aarch32.hpp
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020, 2022 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_arch_gcc_aarch32.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH32_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH32_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__ARMEL__) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
|
||||
(defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
|
||||
defined(BOOST_WINDOWS)
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH32_LITTLE_ENDIAN
|
||||
#elif defined(__ARMEB__) || \
|
||||
defined(__ARM_BIG_ENDIAN) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
|
||||
(defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__))
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH32_BIG_ENDIAN
|
||||
#else
|
||||
#include <boost/predef/other/endian.h>
|
||||
#if BOOST_ENDIAN_LITTLE_BYTE
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH32_LITTLE_ENDIAN
|
||||
#elif BOOST_ENDIAN_BIG_BYTE
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH32_BIG_ENDIAN
|
||||
#else
|
||||
#error "Boost.Atomic: Failed to determine AArch32 endianness, the target platform is not supported. Please, report to the developers (patches are welcome)."
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH32_HPP_INCLUDED_
|
||||
65
include/boost/atomic/detail/caps_arch_gcc_aarch64.hpp
Normal file
65
include/boost/atomic/detail/caps_arch_gcc_aarch64.hpp
Normal file
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020, 2022 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_arch_gcc_aarch64.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH64_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH64_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__AARCH64EL__) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
|
||||
(defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
|
||||
defined(BOOST_WINDOWS)
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_LITTLE_ENDIAN
|
||||
#elif defined(__AARCH64EB__) || \
|
||||
defined(__ARM_BIG_ENDIAN) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
|
||||
(defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__))
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_BIG_ENDIAN
|
||||
#else
|
||||
#include <boost/predef/other/endian.h>
|
||||
#if BOOST_ENDIAN_LITTLE_BYTE
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_LITTLE_ENDIAN
|
||||
#elif BOOST_ENDIAN_BIG_BYTE
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_BIG_ENDIAN
|
||||
#else
|
||||
#error "Boost.Atomic: Failed to determine AArch64 endianness, the target platform is not supported. Please, report to the developers (patches are welcome)."
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__ARM_FEATURE_ATOMICS)
|
||||
// ARMv8.1 added Large System Extensions, which includes cas, swp, and a number of other read-modify-write instructions
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE
|
||||
#endif
|
||||
|
||||
#if defined(__ARM_FEATURE_COMPLEX)
|
||||
// ARMv8.3 added Release Consistency processor consistent (RCpc) memory model, which includes ldapr and similar instructions.
|
||||
// Unfortunately, there seems to be no dedicated __ARM_FEATURE macro for this, so we use __ARM_FEATURE_COMPLEX, which is also defined starting ARMv8.3.
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_HAS_RCPC
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH64_HPP_INCLUDED_
|
||||
34
include/boost/atomic/detail/caps_arch_gcc_alpha.hpp
Normal file
34
include/boost/atomic/detail/caps_arch_gcc_alpha.hpp
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_arch_gcc_alpha.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ALPHA_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ALPHA_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ALPHA_HPP_INCLUDED_
|
||||
103
include/boost/atomic/detail/caps_arch_gcc_arm.hpp
Normal file
103
include/boost/atomic/detail/caps_arch_gcc_arm.hpp
Normal file
@@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2009 Phil Endecott
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* ARM Code by Phil Endecott, based on other architectures.
|
||||
* Copyright (c) 2014, 2020, 2022 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_arch_gcc_arm.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ARM_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ARM_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/platform.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__ARMEL__) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
|
||||
(defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
|
||||
defined(BOOST_WINDOWS)
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_LITTLE_ENDIAN
|
||||
#elif defined(__ARMEB__) || \
|
||||
defined(__ARM_BIG_ENDIAN) || \
|
||||
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
|
||||
(defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__))
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_BIG_ENDIAN
|
||||
#else
|
||||
#include <boost/predef/other/endian.h>
|
||||
#if BOOST_ENDIAN_LITTLE_BYTE
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_LITTLE_ENDIAN
|
||||
#elif BOOST_ENDIAN_BIG_BYTE
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_BIG_ENDIAN
|
||||
#else
|
||||
#error "Boost.Atomic: Failed to determine ARM endianness, the target platform is not supported. Please, report to the developers (patches are welcome)."
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH >= 6)
|
||||
|
||||
#if BOOST_ATOMIC_DETAIL_ARM_ARCH > 6
|
||||
// ARMv7 and later have dmb instruction
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_HAS_DMB 1
|
||||
#endif
|
||||
|
||||
#if defined(__ARM_FEATURE_LDREX)
|
||||
|
||||
#if (__ARM_FEATURE_LDREX & 1)
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
|
||||
#endif
|
||||
#if (__ARM_FEATURE_LDREX & 2)
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
|
||||
#endif
|
||||
#if (__ARM_FEATURE_LDREX & 8)
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
|
||||
#endif
|
||||
|
||||
#else // defined(__ARM_FEATURE_LDREX)
|
||||
|
||||
#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
|
||||
|
||||
// ARMv6k and ARMv7 have 8 and 16-bit ldrex/strex variants, but at least GCC 4.7 fails to compile them. GCC 4.9 is known to work.
|
||||
#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
|
||||
#endif
|
||||
|
||||
#if !(((defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)) && defined(__thumb__)) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7M__))
|
||||
// ARMv6k and ARMv7 except ARMv7-M have 64-bit ldrex/strex variants.
|
||||
// Unfortunately, GCC (at least 4.7.3 on Ubuntu) does not allocate register pairs properly when targeting ARMv6k Thumb,
|
||||
// which is required for ldrexd/strexd instructions, so we disable 64-bit support. When targeting ARMv6k ARM
|
||||
// or ARMv7 (both ARM and Thumb 2) it works as expected.
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
|
||||
#endif
|
||||
|
||||
#endif // !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
|
||||
|
||||
#endif // defined(__ARM_FEATURE_LDREX)
|
||||
|
||||
#endif // defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH >= 6)
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
|
||||
#endif
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ARM_HPP_INCLUDED_
|
||||
55
include/boost/atomic/detail/caps_arch_gcc_ppc.hpp
Normal file
55
include/boost/atomic/detail/caps_arch_gcc_ppc.hpp
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_arch_gcc_ppc.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_PPC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_PPC_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__POWERPC__) || defined(__PPC__)
|
||||
|
||||
#if defined(_ARCH_PWR8)
|
||||
// Power8 and later architectures have 8 and 16-bit instructions
|
||||
#define BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX
|
||||
#define BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX
|
||||
#endif
|
||||
|
||||
#if defined(__powerpc64__) || defined(__PPC64__)
|
||||
// Power7 and later architectures in 64-bit mode have 64-bit instructions
|
||||
#define BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX
|
||||
#if defined(_ARCH_PWR8)
|
||||
// Power8 also has 128-bit instructions
|
||||
#define BOOST_ATOMIC_DETAIL_PPC_HAS_LQARX_STQCX
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif // defined(__POWERPC__) || defined(__PPC__)
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
|
||||
#endif
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_PPC_HPP_INCLUDED_
|
||||
34
include/boost/atomic/detail/caps_arch_gcc_sparc.hpp
Normal file
34
include/boost/atomic/detail/caps_arch_gcc_sparc.hpp
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2010 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_arch_gcc_sparc.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SPARC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SPARC_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SPARC_HPP_INCLUDED_
|
||||
74
include/boost/atomic/detail/caps_arch_gcc_x86.hpp
Normal file
74
include/boost/atomic/detail/caps_arch_gcc_x86.hpp
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2012 Tim Blechmann
|
||||
* Copyright (c) 2013 - 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_arch_gcc_x86.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_X86_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_X86_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__)
|
||||
|
||||
#if defined(__i386__) &&\
|
||||
(\
|
||||
defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\
|
||||
defined(__i586__) || defined(__i686__) || defined(__SSE__)\
|
||||
)
|
||||
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
|
||||
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__) || defined(__SSE2__)
|
||||
// Use mfence only if SSE2 is available
|
||||
#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
|
||||
#endif
|
||||
|
||||
#else // defined(__GNUC__)
|
||||
|
||||
#if defined(__i386__) && !defined(BOOST_ATOMIC_NO_CMPXCHG8B)
|
||||
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
|
||||
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_NO_MFENCE)
|
||||
#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
|
||||
#endif
|
||||
|
||||
#endif // defined(__GNUC__)
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#if defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
|
||||
#endif
|
||||
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
|
||||
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
|
||||
#endif
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_X86_HPP_INCLUDED_
|
||||
34
include/boost/atomic/detail/caps_arch_msvc_arm.hpp
Normal file
34
include/boost/atomic/detail/caps_arch_msvc_arm.hpp
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2012 - 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_arch_msvc_arm.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_ARM_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_ARM_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_ARM_HPP_INCLUDED_
|
||||
61
include/boost/atomic/detail/caps_arch_msvc_x86.hpp
Normal file
61
include/boost/atomic/detail/caps_arch_msvc_x86.hpp
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2012 - 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_arch_msvc_x86.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_X86_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_X86_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(_M_IX86) && _M_IX86 >= 500
|
||||
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
|
||||
#endif
|
||||
|
||||
#if defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
|
||||
#if defined(__clang__)
|
||||
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
|
||||
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
|
||||
#endif
|
||||
#elif _MSC_VER >= 1500
|
||||
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))
|
||||
// Use mfence only if SSE2 is available
|
||||
#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
|
||||
#if defined(_M_AMD64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
|
||||
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_X86_HPP_INCLUDED_
|
||||
158
include/boost/atomic/detail/caps_gcc_atomic.hpp
Normal file
158
include/boost/atomic/detail/caps_gcc_atomic.hpp
Normal file
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2014, 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_gcc_atomic.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/int_sizes.hpp>
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
|
||||
#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/caps_arch_)
|
||||
#endif
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
// Translate type-based lock-free macros to size-based ones
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
|
||||
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 16
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 16
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 16
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
|
||||
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 16
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
// On x86-64, clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16:
|
||||
// https://bugs.llvm.org/show_bug.cgi?id=19149
|
||||
// Another problem exists with gcc 7 and later, as it requires to link with libatomic to use 16-byte intrinsics:
|
||||
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
|
||||
// Both clang and gcc do generate cmpxchg16b for __sync_val_compare_and_swap though.
|
||||
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) &&\
|
||||
(\
|
||||
(defined(BOOST_CLANG) && (__clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 5))) ||\
|
||||
(defined(BOOST_GCC) && BOOST_GCC >= 70000)\
|
||||
)
|
||||
#undef BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
// On 32-bit x86, there is a clang bug for 64-bit atomics: https://bugs.llvm.org/show_bug.cgi?id=19355. The compiler defines
|
||||
// __GCC_ATOMIC_LLONG_LOCK_FREE to 1 when the target architecture supports 64-bit atomic instructions (i.e. the value should be 2).
|
||||
// Additionally, any clang version requires to link with libatomic for 64-bit __atomic* intrinsics on x86. It does generate
|
||||
// cmpxchg8b for __sync_val_compare_and_swap though.
|
||||
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) && defined(BOOST_CLANG)
|
||||
#undef BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
// Override arch-specific macros if atomic intrinsics provide better guarantees
|
||||
#if !defined(BOOST_ATOMIC_INT128_LOCK_FREE) || (BOOST_ATOMIC_INT128_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE)
|
||||
#undef BOOST_ATOMIC_INT128_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT128_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_INT64_LOCK_FREE) || (BOOST_ATOMIC_INT64_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE) || (BOOST_ATOMIC_INT64_LOCK_FREE < BOOST_ATOMIC_INT128_LOCK_FREE)
|
||||
#undef BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT128_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_INT32_LOCK_FREE) || (BOOST_ATOMIC_INT32_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE) || (BOOST_ATOMIC_INT32_LOCK_FREE < BOOST_ATOMIC_INT64_LOCK_FREE)
|
||||
#undef BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_INT16_LOCK_FREE) || (BOOST_ATOMIC_INT16_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE) || (BOOST_ATOMIC_INT16_LOCK_FREE < BOOST_ATOMIC_INT32_LOCK_FREE)
|
||||
#undef BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_INT8_LOCK_FREE) || (BOOST_ATOMIC_INT8_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE) || (BOOST_ATOMIC_INT8_LOCK_FREE < BOOST_ATOMIC_INT16_LOCK_FREE)
|
||||
#undef BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE
|
||||
#else
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_POINTER_LOCK_FREE) || (BOOST_ATOMIC_POINTER_LOCK_FREE < __GCC_ATOMIC_POINTER_LOCK_FREE)
|
||||
#undef BOOST_ATOMIC_POINTER_LOCK_FREE
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_THREAD_FENCE) || (BOOST_ATOMIC_THREAD_FENCE < 2)
|
||||
#undef BOOST_ATOMIC_THREAD_FENCE
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#endif
|
||||
#if !defined(BOOST_ATOMIC_SIGNAL_FENCE) || (BOOST_ATOMIC_SIGNAL_FENCE < 2)
|
||||
#undef BOOST_ATOMIC_SIGNAL_FENCE
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
|
||||
54
include/boost/atomic/detail/caps_gcc_sync.hpp
Normal file
54
include/boost/atomic/detail/caps_gcc_sync.hpp
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2011 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_gcc_sync.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\
|
||||
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
|
||||
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
|
||||
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|
||||
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#endif
|
||||
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
|
||||
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
|
||||
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|
||||
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#endif
|
||||
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
|
||||
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|
||||
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#endif
|
||||
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|
||||
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
|
||||
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
|
||||
#endif
|
||||
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
|
||||
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
|
||||
35
include/boost/atomic/detail/caps_linux_arm.hpp
Normal file
35
include/boost/atomic/detail/caps_linux_arm.hpp
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009, 2011 Helge Bahmann
|
||||
* Copyright (c) 2009 Phil Endecott
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Linux-specific code by Phil Endecott
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_linux_arm.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
|
||||
33
include/boost/atomic/detail/caps_windows.hpp
Normal file
33
include/boost/atomic/detail/caps_windows.hpp
Normal file
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2012 - 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/caps_windows.hpp
|
||||
*
|
||||
* This header defines feature capabilities macros
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
|
||||
50
include/boost/atomic/detail/cas_based_exchange.hpp
Normal file
50
include/boost/atomic/detail/cas_based_exchange.hpp
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/cas_based_exchange.hpp
|
||||
*
|
||||
* This header contains CAS-based implementation of exchange operation.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAS_BASED_EXCHANGE_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CAS_BASED_EXCHANGE_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename Base >
|
||||
struct cas_based_exchange :
|
||||
public Base
|
||||
{
|
||||
using storage_type = typename Base::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {}
|
||||
return old_val;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CAS_BASED_EXCHANGE_HPP_INCLUDED_
|
||||
93
include/boost/atomic/detail/chrono.hpp
Normal file
93
include/boost/atomic/detail/chrono.hpp
Normal file
@@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/chrono.hpp
|
||||
*
|
||||
* This header contains \c std::chrono utilities.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CHRONO_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CHRONO_HPP_INCLUDED_
|
||||
|
||||
#include <time.h>
|
||||
#include <chrono>
|
||||
#if !defined(__cpp_lib_chrono) || (__cpp_lib_chrono < 201510l)
|
||||
#include <ratio>
|
||||
#include <type_traits>
|
||||
#endif // !defined(__cpp_lib_chrono) || (__cpp_lib_chrono < 201510l)
|
||||
#if defined(CLOCK_REALTIME)
|
||||
#include <boost/atomic/posix_clock_traits_fwd.hpp>
|
||||
#endif // defined(CLOCK_REALTIME)
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
namespace chrono {
|
||||
|
||||
#if defined(__cpp_lib_chrono) && (__cpp_lib_chrono >= 201510l)
|
||||
|
||||
using std::chrono::ceil;
|
||||
|
||||
#else // defined(__cpp_lib_chrono) && (__cpp_lib_chrono >= 201510l)
|
||||
|
||||
template< typename To, typename Rep, typename Period >
|
||||
inline constexpr To ceil(std::chrono::duration< Rep, Period > from) noexcept
|
||||
{
|
||||
using conv_ratio = std::ratio_divide< Period, typename To::period >;
|
||||
using common_rep = typename std::common_type< Rep, typename To::rep, decltype(conv_ratio::num) >::type;
|
||||
return To(static_cast< typename To::rep >((static_cast< common_rep >(from.count()) * conv_ratio::num) / conv_ratio::den +
|
||||
static_cast< common_rep >(((static_cast< common_rep >(from.count()) * conv_ratio::num) % conv_ratio::den) != static_cast< common_rep >(0))));
|
||||
}
|
||||
|
||||
#endif // defined(__cpp_lib_chrono) && (__cpp_lib_chrono >= 201510l)
|
||||
|
||||
} // namespace chrono
|
||||
} // namespace detail
|
||||
|
||||
#if defined(CLOCK_REALTIME)
|
||||
|
||||
//! Integrate `std::chrono::system_clock` with POSIX clocks
|
||||
template< >
|
||||
struct posix_clock_traits< std::chrono::system_clock >
|
||||
{
|
||||
//! POSIX clock identifier
|
||||
static constexpr clockid_t clock_id = CLOCK_REALTIME;
|
||||
|
||||
//! Function that converts a time point to a timespec structure
|
||||
static timespec to_timespec(std::chrono::system_clock::time_point time_point) noexcept
|
||||
{
|
||||
timespec ts{};
|
||||
std::chrono::nanoseconds::rep time_ns = std::chrono::duration_cast< std::chrono::nanoseconds >(time_point.time_since_epoch()).count();
|
||||
// Note: The standard doesn't require that std::chrono::system_clock epoch matches the POSIX CLOCK_REALTIME epoch. Also, std::chrono::system_clock::to_time_t
|
||||
// is allowed to round or truncate the time point when converting to time_t resolution, which means to_time_t may return a time before or after time_point.
|
||||
ts.tv_sec = std::chrono::system_clock::to_time_t(std::chrono::system_clock::time_point()) + static_cast< decltype(ts.tv_sec) >(time_ns / 1000000000);
|
||||
time_ns %= 1000000000;
|
||||
if (BOOST_UNLIKELY(time_ns < 0))
|
||||
{
|
||||
--ts.tv_sec;
|
||||
time_ns += 1000000000;
|
||||
}
|
||||
ts.tv_nsec = static_cast< decltype(ts.tv_nsec) >(time_ns);
|
||||
return ts;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // defined(CLOCK_REALTIME)
|
||||
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CHRONO_HPP_INCLUDED_
|
||||
89
include/boost/atomic/detail/classify.hpp
Normal file
89
include/boost/atomic/detail/classify.hpp
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/classify.hpp
|
||||
*
|
||||
* This header contains type traits for type classification.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CLASSIFY_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CLASSIFY_HPP_INCLUDED_
|
||||
|
||||
#include <type_traits>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/type_traits/is_integral.hpp>
|
||||
#include <boost/atomic/detail/type_traits/is_floating_point.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename T, bool IsFunction = std::is_function< T >::value >
|
||||
struct classify_pointer
|
||||
{
|
||||
using type = void*;
|
||||
};
|
||||
|
||||
template< typename T >
|
||||
struct classify_pointer< T, true >
|
||||
{
|
||||
using type = void;
|
||||
};
|
||||
|
||||
template<
|
||||
typename T,
|
||||
bool IsInt = atomics::detail::is_integral< T >::value,
|
||||
bool IsFloat = atomics::detail::is_floating_point< T >::value,
|
||||
bool IsEnum = std::is_enum< T >::value
|
||||
>
|
||||
struct classify
|
||||
{
|
||||
using type = void;
|
||||
};
|
||||
|
||||
template< typename T >
|
||||
struct classify< T, true, false, false > { using type = int; };
|
||||
|
||||
#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
|
||||
template< typename T >
|
||||
struct classify< T, false, true, false > { using type = float; };
|
||||
#endif
|
||||
|
||||
template< typename T >
|
||||
struct classify< T, false, false, true > { using type = const int; };
|
||||
|
||||
template< typename T >
|
||||
struct classify< T*, false, false, false > { using type = typename classify_pointer< T >::type; };
|
||||
|
||||
template< >
|
||||
struct classify< void*, false, false, false > { using type = void; };
|
||||
|
||||
template< >
|
||||
struct classify< const void*, false, false, false > { using type = void; };
|
||||
|
||||
template< >
|
||||
struct classify< volatile void*, false, false, false > { using type = void; };
|
||||
|
||||
template< >
|
||||
struct classify< const volatile void*, false, false, false > { using type = void; };
|
||||
|
||||
template< typename T, typename U >
|
||||
struct classify< T U::*, false, false, false > { using type = void; };
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CLASSIFY_HPP_INCLUDED_
|
||||
134
include/boost/atomic/detail/config.hpp
Normal file
134
include/boost/atomic/detail/config.hpp
Normal file
@@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2012 Hartmut Kaiser
|
||||
* Copyright (c) 2014-2018, 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/config.hpp
|
||||
*
|
||||
* This header defines configuraion macros for Boost.Atomic
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
|
||||
|
||||
#include <boost/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__SANITIZE_THREAD__)
|
||||
#define BOOST_ATOMIC_DETAIL_TSAN
|
||||
#elif defined(__has_feature)
|
||||
#if __has_feature(thread_sanitizer)
|
||||
#define BOOST_ATOMIC_DETAIL_TSAN
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Instrumentation macros to make TSan aware of the memory order semantics of asm blocks
|
||||
#if defined(BOOST_ATOMIC_DETAIL_TSAN)
|
||||
extern "C" {
|
||||
void __tsan_acquire(void*);
|
||||
void __tsan_release(void*);
|
||||
} // extern "C"
|
||||
#define BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(ptr, mo) \
|
||||
{ if ((static_cast< unsigned int >(mo) & static_cast< unsigned int >(memory_order_acquire)) != 0u) __tsan_acquire((void*)(ptr)); }
|
||||
#define BOOST_ATOMIC_DETAIL_TSAN_RELEASE(ptr, mo) \
|
||||
{ if ((static_cast< unsigned int >(mo) & static_cast< unsigned int >(memory_order_release)) != 0u) __tsan_release((void*)(ptr)); }
|
||||
#else // defined(BOOST_ATOMIC_DETAIL_TSAN)
|
||||
#define BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(ptr, mo)
|
||||
#define BOOST_ATOMIC_DETAIL_TSAN_RELEASE(ptr, mo)
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_TSAN)
|
||||
|
||||
#if defined(__CUDACC__)
|
||||
// nvcc does not support alternatives ("q,m") in asm statement constraints
|
||||
#define BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES
|
||||
// nvcc does not support condition code register ("cc") clobber in asm statements
|
||||
#define BOOST_ATOMIC_DETAIL_NO_ASM_CLOBBER_CC
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CLOBBER_CC)
|
||||
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC "cc"
|
||||
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "cc",
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA
|
||||
#endif
|
||||
|
||||
#if (defined(__i386__) || defined(__x86_64__)) && (defined(__clang__) || (defined(BOOST_GCC) && BOOST_GCC < 40500) || defined(__SUNPRO_CC))
|
||||
// This macro indicates that the compiler does not support allocating eax:edx or rax:rdx register pairs ("A") in asm blocks
|
||||
#define BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS
|
||||
#endif
|
||||
|
||||
#if defined(__i386__) && (defined(__PIC__) || defined(__PIE__)) && !(defined(__clang__) || (defined(BOOST_GCC) && BOOST_GCC >= 50100))
|
||||
// This macro indicates that asm blocks should preserve ebx value unchanged. Some compilers are able to maintain ebx themselves
|
||||
// around the asm blocks. For those compilers we don't need to save/restore ebx in asm blocks.
|
||||
#define BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_NO_CXX11_ALIGNAS) ||\
|
||||
(defined(BOOST_GCC) && BOOST_GCC < 40900) ||\
|
||||
(defined(BOOST_MSVC) && BOOST_MSVC < 1910 && defined(_M_IX86))
|
||||
// gcc prior to 4.9 doesn't support alignas with a constant expression as an argument.
|
||||
// MSVC 14.0 does support alignas, but in 32-bit mode emits "error C2719: formal parameter with requested alignment of N won't be aligned" for N > 4,
|
||||
// when aligned types are used in function arguments, even though the std::max_align_t type has alignment of 8.
|
||||
#define BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS
|
||||
#endif
|
||||
|
||||
// Enable pointer/reference casts between storage and value when possible.
|
||||
// Note: Despite that MSVC does not employ strict aliasing rules for optimizations
|
||||
// and does not require an explicit markup for types that may alias, we still don't
|
||||
// enable the optimization for this compiler because at least MSVC-8 and 9 are known
|
||||
// to generate broken code sometimes when casts are used.
|
||||
#define BOOST_ATOMIC_DETAIL_MAY_ALIAS BOOST_MAY_ALIAS
|
||||
#if !defined(BOOST_NO_MAY_ALIAS)
|
||||
#define BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS
|
||||
#endif
|
||||
|
||||
#if defined(__GCC_ASM_FLAG_OUTPUTS__)
|
||||
// The compiler supports output values in flag registers.
|
||||
// See: https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html, Section 6.44.3.
|
||||
#define BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS
|
||||
#endif
|
||||
|
||||
#if defined(__has_builtin)
|
||||
#if __has_builtin(__builtin_constant_p)
|
||||
#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) __builtin_constant_p(x)
|
||||
#endif
|
||||
#if __has_builtin(__builtin_clear_padding)
|
||||
#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x) __builtin_clear_padding(x)
|
||||
#elif __has_builtin(__builtin_zero_non_value_bits)
|
||||
#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x) __builtin_zero_non_value_bits(x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_IS_CONSTANT) && defined(__GNUC__)
|
||||
#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) __builtin_constant_p(x)
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_IS_CONSTANT)
|
||||
#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) false
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_CLEAR_PADDING) && defined(BOOST_MSVC) && BOOST_MSVC >= 1927
|
||||
// Note that as of MSVC 19.29 this intrinsic does not clear padding in unions:
|
||||
// https://developercommunity.visualstudio.com/t/__builtin_zero_non_value_bits-does-not-c/1551510
|
||||
#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x) __builtin_zero_non_value_bits(x)
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_CLEAR_PADDING)
|
||||
#define BOOST_ATOMIC_NO_CLEAR_PADDING
|
||||
#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x)
|
||||
#endif
|
||||
|
||||
#if (defined(__BYTE_ORDER__) && defined(__FLOAT_WORD_ORDER__) && __BYTE_ORDER__ == __FLOAT_WORD_ORDER__) ||\
|
||||
defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_AMD64) || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
// This macro indicates that integer and floating point endianness is the same
|
||||
#define BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
|
||||
50
include/boost/atomic/detail/core_arch_operations.hpp
Normal file
50
include/boost/atomic/detail/core_arch_operations.hpp
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_arch_operations.hpp
|
||||
*
|
||||
* This header defines core atomic operations, including the emulated version.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/core_operations_emulated.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/platform.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
|
||||
#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/core_arch_ops_)
|
||||
#endif
|
||||
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Default specialization that falls back to lock-based implementation
|
||||
template< std::size_t Size, bool Signed, bool Interprocess >
|
||||
struct core_arch_operations :
|
||||
public core_operations_emulated< Size, storage_traits< Size >::alignment, Signed, Interprocess >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_HPP_INCLUDED_
|
||||
38
include/boost/atomic/detail/core_arch_operations_fwd.hpp
Normal file
38
include/boost/atomic/detail/core_arch_operations_fwd.hpp
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_arch_operations_fwd.hpp
|
||||
*
|
||||
* This header contains forward declaration of the \c core_arch_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< std::size_t Size, bool Signed, bool Interprocess >
|
||||
struct core_arch_operations;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
1228
include/boost/atomic/detail/core_arch_ops_gcc_aarch32.hpp
Normal file
1228
include/boost/atomic/detail/core_arch_ops_gcc_aarch32.hpp
Normal file
File diff suppressed because it is too large
Load Diff
2038
include/boost/atomic/detail/core_arch_ops_gcc_aarch64.hpp
Normal file
2038
include/boost/atomic/detail/core_arch_ops_gcc_aarch64.hpp
Normal file
File diff suppressed because it is too large
Load Diff
867
include/boost/atomic/detail/core_arch_ops_gcc_alpha.hpp
Normal file
867
include/boost/atomic/detail/core_arch_ops_gcc_alpha.hpp
Normal file
@@ -0,0 +1,867 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_arch_ops_gcc_alpha.hpp
|
||||
*
|
||||
* This header contains implementation of the \c core_arch_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
/*
|
||||
Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
|
||||
(HP OpenVMS systems documentation) and the Alpha Architecture Reference Manual.
|
||||
*/
|
||||
|
||||
/*
|
||||
NB: The most natural thing would be to write the increment/decrement
|
||||
operators along the following lines:
|
||||
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1: ldl_l %0,%1 \n"
|
||||
"addl %0,1,%0 \n"
|
||||
"stl_c %0,%1 \n"
|
||||
"beq %0,1b\n"
|
||||
: "=&b" (tmp)
|
||||
: "m" (value)
|
||||
: "cc"
|
||||
);
|
||||
|
||||
However according to the comments on the HP website and matching
|
||||
comments in the Linux kernel sources this defies branch prediction,
|
||||
as the cpu assumes that backward branches are always taken; so
|
||||
instead copy the trick from the Linux kernel, introduce a forward
|
||||
branch and back again.
|
||||
|
||||
I have, however, had a hard time measuring the difference between
|
||||
the two versions in microbenchmarks -- I am leaving it in nevertheless
|
||||
as it apparently does not hurt either.
|
||||
*/
|
||||
|
||||
struct core_arch_operations_gcc_alpha_base
|
||||
{
|
||||
static constexpr bool full_cas_based = false;
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static BOOST_FORCEINLINE void fence_before(memory_order order) noexcept
|
||||
{
|
||||
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
|
||||
__asm__ __volatile__ ("mb" ::: "memory");
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after(memory_order order) noexcept
|
||||
{
|
||||
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
|
||||
__asm__ __volatile__ ("mb" ::: "memory");
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after_store(memory_order order) noexcept
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
__asm__ __volatile__ ("mb" ::: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 4u, Signed, Interprocess > :
|
||||
public core_arch_operations_gcc_alpha_base
|
||||
{
|
||||
using storage_type = typename storage_traits< 4u >::type;
|
||||
|
||||
static constexpr std::size_t storage_size = 4u;
|
||||
static constexpr std::size_t storage_alignment = 4u;
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
fence_before(order);
|
||||
storage = v;
|
||||
fence_after_store(order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type v = storage;
|
||||
fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, tmp;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"mov %3, %1\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (tmp) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
fence_before(success_order);
|
||||
int success;
|
||||
storage_type current;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %2, %4\n\t" // current = *(&storage)
|
||||
"cmpeq %2, %0, %3\n\t" // success = current == expected
|
||||
"mov %2, %0\n\t" // expected = current
|
||||
"beq %3, 2f\n\t" // if (success == 0) goto end
|
||||
"stl_c %1, %4\n\t" // storage = desired; desired = store succeeded
|
||||
"mov %1, %3\n\t" // success = desired
|
||||
"2:\n\t"
|
||||
: "+r" (expected), // %0
|
||||
"+r" (desired), // %1
|
||||
"=&r" (current), // %2
|
||||
"=&r" (success) // %3
|
||||
: "m" (storage) // %4
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
if (success)
|
||||
fence_after(success_order);
|
||||
else
|
||||
fence_after(failure_order);
|
||||
return !!success;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
int success;
|
||||
storage_type current, tmp;
|
||||
fence_before(success_order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"mov %5, %1\n\t" // tmp = desired
|
||||
"ldl_l %2, %4\n\t" // current = *(&storage)
|
||||
"cmpeq %2, %0, %3\n\t" // success = current == expected
|
||||
"mov %2, %0\n\t" // expected = current
|
||||
"beq %3, 2f\n\t" // if (success == 0) goto end
|
||||
"stl_c %1, %4\n\t" // storage = tmp; tmp = store succeeded
|
||||
"beq %1, 3f\n\t" // if (tmp == 0) goto retry
|
||||
"mov %1, %3\n\t" // success = tmp
|
||||
"2:\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"3: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "+r" (expected), // %0
|
||||
"=&r" (tmp), // %1
|
||||
"=&r" (current), // %2
|
||||
"=&r" (success) // %3
|
||||
: "m" (storage), // %4
|
||||
"r" (desired) // %5
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
if (success)
|
||||
fence_after(success_order);
|
||||
else
|
||||
fence_after(failure_order);
|
||||
return !!success;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"addl %0, %3, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"subl %0, %3, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"and %0, %3, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"bis %0, %3, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"xor %0, %3, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!exchange(storage, (storage_type)1, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
store(storage, 0, order);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template< bool Interprocess >
|
||||
struct core_arch_operations< 1u, false, Interprocess > :
|
||||
public core_arch_operations< 4u, false, Interprocess >
|
||||
{
|
||||
using base_type = core_arch_operations< 4u, false, Interprocess >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
base_type::fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"addl %0, %3, %1\n\t"
|
||||
"zapnot %1, 1, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
base_type::fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
base_type::fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"subl %0, %3, %1\n\t"
|
||||
"zapnot %1, 1, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
base_type::fence_after(order);
|
||||
return original;
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Interprocess >
|
||||
struct core_arch_operations< 1u, true, Interprocess > :
|
||||
public core_arch_operations< 4u, true, Interprocess >
|
||||
{
|
||||
using base_type = core_arch_operations< 4u, true, Interprocess >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
base_type::fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"addl %0, %3, %1\n\t"
|
||||
"sextb %1, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
base_type::fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
base_type::fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"subl %0, %3, %1\n\t"
|
||||
"sextb %1, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
base_type::fence_after(order);
|
||||
return original;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template< bool Interprocess >
|
||||
struct core_arch_operations< 2u, false, Interprocess > :
|
||||
public core_arch_operations< 4u, false, Interprocess >
|
||||
{
|
||||
using base_type = core_arch_operations< 4u, false, Interprocess >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
base_type::fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"addl %0, %3, %1\n\t"
|
||||
"zapnot %1, 3, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
base_type::fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
base_type::fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"subl %0, %3, %1\n\t"
|
||||
"zapnot %1, 3, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
base_type::fence_after(order);
|
||||
return original;
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Interprocess >
|
||||
struct core_arch_operations< 2u, true, Interprocess > :
|
||||
public core_arch_operations< 4u, true, Interprocess >
|
||||
{
|
||||
using base_type = core_arch_operations< 4u, true, Interprocess >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
base_type::fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"addl %0, %3, %1\n\t"
|
||||
"sextw %1, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
base_type::fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
base_type::fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldl_l %0, %2\n\t"
|
||||
"subl %0, %3, %1\n\t"
|
||||
"sextw %1, %1\n\t"
|
||||
"stl_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
base_type::fence_after(order);
|
||||
return original;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 8u, Signed, Interprocess > :
|
||||
public core_arch_operations_gcc_alpha_base
|
||||
{
|
||||
using storage_type = typename storage_traits< 8u >::type;
|
||||
|
||||
static constexpr std::size_t storage_size = 8u;
|
||||
static constexpr std::size_t storage_alignment = 8u;
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
fence_before(order);
|
||||
storage = v;
|
||||
fence_after_store(order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type v = storage;
|
||||
fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, tmp;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"mov %3, %1\n\t"
|
||||
"ldq_l %0, %2\n\t"
|
||||
"stq_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (tmp) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
fence_before(success_order);
|
||||
int success;
|
||||
storage_type current;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldq_l %2, %4\n\t" // current = *(&storage)
|
||||
"cmpeq %2, %0, %3\n\t" // success = current == expected
|
||||
"mov %2, %0\n\t" // expected = current
|
||||
"beq %3, 2f\n\t" // if (success == 0) goto end
|
||||
"stq_c %1, %4\n\t" // storage = desired; desired = store succeeded
|
||||
"mov %1, %3\n\t" // success = desired
|
||||
"2:\n\t"
|
||||
: "+r" (expected), // %0
|
||||
"+r" (desired), // %1
|
||||
"=&r" (current), // %2
|
||||
"=&r" (success) // %3
|
||||
: "m" (storage) // %4
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
if (success)
|
||||
fence_after(success_order);
|
||||
else
|
||||
fence_after(failure_order);
|
||||
return !!success;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
int success;
|
||||
storage_type current, tmp;
|
||||
fence_before(success_order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"mov %5, %1\n\t" // tmp = desired
|
||||
"ldq_l %2, %4\n\t" // current = *(&storage)
|
||||
"cmpeq %2, %0, %3\n\t" // success = current == expected
|
||||
"mov %2, %0\n\t" // expected = current
|
||||
"beq %3, 2f\n\t" // if (success == 0) goto end
|
||||
"stq_c %1, %4\n\t" // storage = tmp; tmp = store succeeded
|
||||
"beq %1, 3f\n\t" // if (tmp == 0) goto retry
|
||||
"mov %1, %3\n\t" // success = tmp
|
||||
"2:\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"3: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "+r" (expected), // %0
|
||||
"=&r" (tmp), // %1
|
||||
"=&r" (current), // %2
|
||||
"=&r" (success) // %3
|
||||
: "m" (storage), // %4
|
||||
"r" (desired) // %5
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
if (success)
|
||||
fence_after(success_order);
|
||||
else
|
||||
fence_after(failure_order);
|
||||
return !!success;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldq_l %0, %2\n\t"
|
||||
"addq %0, %3, %1\n\t"
|
||||
"stq_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldq_l %0, %2\n\t"
|
||||
"subq %0, %3, %1\n\t"
|
||||
"stq_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldq_l %0, %2\n\t"
|
||||
"and %0, %3, %1\n\t"
|
||||
"stq_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldq_l %0, %2\n\t"
|
||||
"bis %0, %3, %1\n\t"
|
||||
"stq_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type original, modified;
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"1:\n\t"
|
||||
"ldq_l %0, %2\n\t"
|
||||
"xor %0, %3, %1\n\t"
|
||||
"stq_c %1, %2\n\t"
|
||||
"beq %1, 2f\n\t"
|
||||
|
||||
".subsection 2\n\t"
|
||||
"2: br 1b\n\t"
|
||||
".previous\n\t"
|
||||
|
||||
: "=&r" (original), // %0
|
||||
"=&r" (modified) // %1
|
||||
: "m" (storage), // %2
|
||||
"r" (v) // %3
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!exchange(storage, (storage_type)1, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
store(storage, (storage_type)0, order);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
|
||||
1417
include/boost/atomic/detail/core_arch_ops_gcc_arm.hpp
Normal file
1417
include/boost/atomic/detail/core_arch_ops_gcc_arm.hpp
Normal file
File diff suppressed because it is too large
Load Diff
1346
include/boost/atomic/detail/core_arch_ops_gcc_ppc.hpp
Normal file
1346
include/boost/atomic/detail/core_arch_ops_gcc_ppc.hpp
Normal file
File diff suppressed because it is too large
Load Diff
215
include/boost/atomic/detail/core_arch_ops_gcc_sparc.hpp
Normal file
215
include/boost/atomic/detail/core_arch_ops_gcc_sparc.hpp
Normal file
@@ -0,0 +1,215 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2010 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_arch_ops_gcc_sparc.hpp
|
||||
*
|
||||
* This header contains implementation of the \c core_arch_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/core_ops_cas_based.hpp>
|
||||
#include <boost/atomic/detail/cas_based_exchange.hpp>
|
||||
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
struct gcc_sparc_cas_base
|
||||
{
|
||||
static constexpr bool full_cas_based = true;
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static BOOST_FORCEINLINE void fence_before(memory_order order) noexcept
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
__asm__ __volatile__ ("membar #Sync" ::: "memory");
|
||||
else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
|
||||
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after(memory_order order) noexcept
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
__asm__ __volatile__ ("membar #Sync" ::: "memory");
|
||||
else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
|
||||
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after_store(memory_order order) noexcept
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
__asm__ __volatile__ ("membar #Sync" ::: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct gcc_sparc_cas32 :
|
||||
public gcc_sparc_cas_base
|
||||
{
|
||||
using storage_type = typename storage_traits< 4u >::type;
|
||||
|
||||
static constexpr std::size_t storage_size = 4u;
|
||||
static constexpr std::size_t storage_alignment = 4u;
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
fence_before(order);
|
||||
storage = v;
|
||||
fence_after_store(order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type v = storage;
|
||||
fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
fence_before(success_order);
|
||||
storage_type previous = expected;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"cas [%1], %2, %0"
|
||||
: "+r" (desired)
|
||||
: "r" (&storage), "r" (previous)
|
||||
: "memory"
|
||||
);
|
||||
const bool success = (desired == previous);
|
||||
if (success)
|
||||
fence_after(success_order);
|
||||
else
|
||||
fence_after(failure_order);
|
||||
expected = desired;
|
||||
return success;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
fence_before(order);
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"swap [%1], %0"
|
||||
: "+r" (v)
|
||||
: "r" (&storage)
|
||||
: "memory"
|
||||
);
|
||||
fence_after(order);
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 4u, Signed, Interprocess > :
|
||||
public core_operations_cas_based< gcc_sparc_cas32< Signed, Interprocess > >
|
||||
{
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 1u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 1u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 2u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 2u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct gcc_sparc_cas64 :
|
||||
public gcc_sparc_cas_base
|
||||
{
|
||||
using storage_type = typename storage_traits< 8u >::type;
|
||||
|
||||
static constexpr std::size_t storage_size = 8u;
|
||||
static constexpr std::size_t storage_alignment = 8u;
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
fence_before(order);
|
||||
storage = v;
|
||||
fence_after_store(order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type v = storage;
|
||||
fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
fence_before(success_order);
|
||||
storage_type previous = expected;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
"casx [%1], %2, %0"
|
||||
: "+r" (desired)
|
||||
: "r" (&storage), "r" (previous)
|
||||
: "memory"
|
||||
);
|
||||
const bool success = (desired == previous);
|
||||
if (success)
|
||||
fence_after(success_order);
|
||||
else
|
||||
fence_after(failure_order);
|
||||
expected = desired;
|
||||
return success;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 8u, Signed, Interprocess > :
|
||||
public core_operations_cas_based< cas_based_exchange< gcc_sparc_cas64< Signed, Interprocess > > >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
|
||||
1177
include/boost/atomic/detail/core_arch_ops_gcc_x86.hpp
Normal file
1177
include/boost/atomic/detail/core_arch_ops_gcc_x86.hpp
Normal file
File diff suppressed because it is too large
Load Diff
828
include/boost/atomic/detail/core_arch_ops_msvc_arm.hpp
Normal file
828
include/boost/atomic/detail/core_arch_ops_msvc_arm.hpp
Normal file
@@ -0,0 +1,828 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2012 Tim Blechmann
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_arch_ops_msvc_arm.hpp
|
||||
*
|
||||
* This header contains implementation of the \c core_arch_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/interlocked.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/type_traits/make_signed.hpp>
|
||||
#include <boost/atomic/detail/ops_msvc_common.hpp>
|
||||
#include <boost/atomic/detail/fence_arch_operations.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
__int8 __iso_volatile_load8(const volatile __int8*);
|
||||
__int16 __iso_volatile_load16(const volatile __int16*);
|
||||
__int32 __iso_volatile_load32(const volatile __int32*);
|
||||
__int64 __iso_volatile_load64(const volatile __int64*);
|
||||
void __iso_volatile_store8(volatile __int8*, __int8);
|
||||
void __iso_volatile_store16(volatile __int16*, __int16);
|
||||
void __iso_volatile_store32(volatile __int32*, __int32);
|
||||
void __iso_volatile_store64(volatile __int64*, __int64);
|
||||
}
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(__iso_volatile_load8)
|
||||
#pragma intrinsic(__iso_volatile_load16)
|
||||
#pragma intrinsic(__iso_volatile_load32)
|
||||
#pragma intrinsic(__iso_volatile_load64)
|
||||
#pragma intrinsic(__iso_volatile_store8)
|
||||
#pragma intrinsic(__iso_volatile_store16)
|
||||
#pragma intrinsic(__iso_volatile_store32)
|
||||
#pragma intrinsic(__iso_volatile_store64)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_LOAD8(p) __iso_volatile_load8((const volatile __int8*)(p))
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_LOAD16(p) __iso_volatile_load16((const volatile __int16*)(p))
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_LOAD32(p) __iso_volatile_load32((const volatile __int32*)(p))
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_LOAD64(p) __iso_volatile_load64((const volatile __int64*)(p))
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_STORE8(p, v) __iso_volatile_store8((volatile __int8*)(p), (__int8)(v))
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_STORE16(p, v) __iso_volatile_store16((volatile __int16*)(p), (__int16)(v))
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_STORE32(p, v) __iso_volatile_store32((volatile __int32*)(p), (__int32)(v))
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_STORE64(p, v) __iso_volatile_store64((volatile __int64*)(p), (__int64)(v))
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
// A note about memory_order_consume. Technically, this architecture allows to avoid
|
||||
// unnecessary memory barrier after consume load since it supports data dependency ordering.
|
||||
// However, some compiler optimizations may break a seemingly valid code relying on data
|
||||
// dependency tracking by injecting bogus branches to aid out of order execution.
|
||||
// This may happen not only in Boost.Atomic code but also in user's code, which we have no
|
||||
// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
|
||||
// For this reason we promote memory_order_consume to memory_order_acquire.
|
||||
|
||||
struct core_arch_operations_msvc_arm_base
|
||||
{
|
||||
static constexpr bool full_cas_based = false;
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static BOOST_FORCEINLINE void fence_before_store(memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
|
||||
fence_arch_operations::hardware_full_fence();
|
||||
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after_store(memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
if (order == memory_order_seq_cst)
|
||||
fence_arch_operations::hardware_full_fence();
|
||||
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after_load(memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
|
||||
fence_arch_operations::hardware_full_fence();
|
||||
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE constexpr memory_order cas_common_order(memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
// Combine order flags together and promote memory_order_consume to memory_order_acquire
|
||||
return static_cast< memory_order >(((static_cast< unsigned int >(failure_order) | static_cast< unsigned int >(success_order)) & ~static_cast< unsigned int >(memory_order_consume))
|
||||
| (((static_cast< unsigned int >(failure_order) | static_cast< unsigned int >(success_order)) & static_cast< unsigned int >(memory_order_consume)) << 1u));
|
||||
}
|
||||
};
|
||||
|
||||
template< std::size_t Size, bool Signed, bool Interprocess, typename Derived >
|
||||
struct core_arch_operations_msvc_arm :
|
||||
public core_arch_operations_msvc_arm_base
|
||||
{
|
||||
using storage_type = typename storage_traits< Size >::type;
|
||||
|
||||
static constexpr std::size_t storage_size = Size;
|
||||
static constexpr std::size_t storage_alignment = storage_traits< Size >::alignment;
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
using signed_storage_type = typename atomics::detail::make_signed< storage_type >::type;
|
||||
return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!Derived::exchange(storage, (storage_type)1, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
Derived::store(storage, (storage_type)0, order);
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 1u, Signed, Interprocess > :
|
||||
public core_arch_operations_msvc_arm< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_arch_operations_msvc_arm< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before_store(order);
|
||||
BOOST_ATOMIC_DETAIL_ARM_STORE8(&storage, v);
|
||||
base_type::fence_after_store(order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD8(&storage);
|
||||
base_type::fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
storage_type previous = expected, old_val;
|
||||
|
||||
switch (base_type::cas_common_order(success_order, failure_order))
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_release:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
|
||||
break;
|
||||
}
|
||||
expected = old_val;
|
||||
|
||||
return (previous == old_val);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 2u, Signed, Interprocess > :
|
||||
public core_arch_operations_msvc_arm< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_arch_operations_msvc_arm< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before_store(order);
|
||||
BOOST_ATOMIC_DETAIL_ARM_STORE16(&storage, v);
|
||||
base_type::fence_after_store(order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD16(&storage);
|
||||
base_type::fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
storage_type previous = expected, old_val;
|
||||
|
||||
switch (base_type::cas_common_order(success_order, failure_order))
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_release:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
|
||||
break;
|
||||
}
|
||||
expected = old_val;
|
||||
|
||||
return (previous == old_val);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 4u, Signed, Interprocess > :
|
||||
public core_arch_operations_msvc_arm< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_arch_operations_msvc_arm< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before_store(order);
|
||||
BOOST_ATOMIC_DETAIL_ARM_STORE32(&storage, v);
|
||||
base_type::fence_after_store(order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD32(&storage);
|
||||
base_type::fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
storage_type previous = expected, old_val;
|
||||
|
||||
switch (base_type::cas_common_order(success_order, failure_order))
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_release:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
|
||||
break;
|
||||
}
|
||||
expected = old_val;
|
||||
|
||||
return (previous == old_val);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 8u, Signed, Interprocess > :
|
||||
public core_arch_operations_msvc_arm< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_arch_operations_msvc_arm< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before_store(order);
|
||||
BOOST_ATOMIC_DETAIL_ARM_STORE64(&storage, v);
|
||||
base_type::fence_after_store(order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD64(&storage);
|
||||
base_type::fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
storage_type previous = expected, old_val;
|
||||
|
||||
switch (base_type::cas_common_order(success_order, failure_order))
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_release:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(&storage, desired, previous));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
|
||||
break;
|
||||
}
|
||||
expected = old_val;
|
||||
|
||||
return (previous == old_val);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(&storage, v));
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(&storage, v));
|
||||
break;
|
||||
case memory_order_release:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(&storage, v));
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
|
||||
break;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD8
|
||||
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD16
|
||||
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD32
|
||||
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD64
|
||||
#undef BOOST_ATOMIC_DETAIL_ARM_STORE8
|
||||
#undef BOOST_ATOMIC_DETAIL_ARM_STORE16
|
||||
#undef BOOST_ATOMIC_DETAIL_ARM_STORE32
|
||||
#undef BOOST_ATOMIC_DETAIL_ARM_STORE64
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
|
||||
902
include/boost/atomic/detail/core_arch_ops_msvc_x86.hpp
Normal file
902
include/boost/atomic/detail/core_arch_ops_msvc_x86.hpp
Normal file
@@ -0,0 +1,902 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2012 Tim Blechmann
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_arch_ops_msvc_x86.hpp
|
||||
*
|
||||
* This header contains implementation of the \c core_arch_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/intptr.hpp>
|
||||
#include <boost/atomic/detail/interlocked.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/type_traits/make_signed.hpp>
|
||||
#include <boost/atomic/detail/capabilities.hpp>
|
||||
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
|
||||
#include <boost/atomic/detail/cas_based_exchange.hpp>
|
||||
#include <boost/atomic/detail/core_ops_cas_based.hpp>
|
||||
#endif
|
||||
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && defined(__AVX__)
|
||||
#include <emmintrin.h>
|
||||
#include <boost/atomic/detail/string_ops.hpp>
|
||||
#endif
|
||||
#include <boost/atomic/detail/ops_msvc_common.hpp>
|
||||
#if !defined(_M_IX86) && !(defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) && defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16))
|
||||
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
|
||||
#endif
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
/*
|
||||
* Implementation note for asm blocks.
|
||||
*
|
||||
* http://msdn.microsoft.com/en-us/data/k1a8ss06%28v=vs.105%29
|
||||
*
|
||||
* Some SSE types require eight-byte stack alignment, forcing the compiler to emit dynamic stack-alignment code.
|
||||
* To be able to access both the local variables and the function parameters after the alignment, the compiler
|
||||
* maintains two frame pointers. If the compiler performs frame pointer omission (FPO), it will use EBP and ESP.
|
||||
* If the compiler does not perform FPO, it will use EBX and EBP. To ensure code runs correctly, do not modify EBX
|
||||
* in asm code if the function requires dynamic stack alignment as it could modify the frame pointer.
|
||||
* Either move the eight-byte aligned types out of the function, or avoid using EBX.
|
||||
*
|
||||
* Since we have no way of knowing that the compiler uses FPO, we have to always save and restore ebx
|
||||
* whenever we have to clobber it. Additionally, we disable warning C4731 in header.hpp so that the compiler
|
||||
* doesn't spam about ebx use.
|
||||
*/
|
||||
|
||||
struct core_arch_operations_msvc_x86_base
|
||||
{
|
||||
static constexpr bool full_cas_based = false;
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static BOOST_FORCEINLINE void fence_before(memory_order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after(memory_order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after_load(memory_order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
// On x86 and x86_64 there is no need for a hardware barrier,
|
||||
// even if seq_cst memory order is requested, because all
|
||||
// seq_cst writes are implemented with lock-prefixed operations
|
||||
// or xchg which has implied lock prefix. Therefore normal loads
|
||||
// are already ordered with seq_cst stores on these architectures.
|
||||
}
|
||||
};
|
||||
|
||||
template< std::size_t Size, bool Signed, bool Interprocess, typename Derived >
|
||||
struct core_arch_operations_msvc_x86 :
|
||||
public core_arch_operations_msvc_x86_base
|
||||
{
|
||||
using storage_type = typename storage_traits< Size >::type;
|
||||
|
||||
static constexpr std::size_t storage_size = Size;
|
||||
static constexpr std::size_t storage_alignment = storage_traits< Size >::alignment;
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_seq_cst)
|
||||
{
|
||||
fence_before(order);
|
||||
storage = v;
|
||||
fence_after(order);
|
||||
}
|
||||
else
|
||||
{
|
||||
Derived::exchange(storage, v, order);
|
||||
}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type v = storage;
|
||||
fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
using signed_storage_type = typename boost::atomics::detail::make_signed< storage_type >::type;
|
||||
return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!Derived::exchange(storage, (storage_type)1, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
store(storage, (storage_type)0, order);
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 4u, Signed, Interprocess > :
|
||||
public core_arch_operations_msvc_x86< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_arch_operations_msvc_x86< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) noexcept
|
||||
{
|
||||
storage_type previous = expected;
|
||||
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
|
||||
expected = old_val;
|
||||
return (previous == old_val);
|
||||
}
|
||||
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
|
||||
}
|
||||
#else
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type res = storage;
|
||||
while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
|
||||
}
|
||||
#else
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type res = storage;
|
||||
while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
|
||||
}
|
||||
#else
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type res = storage;
|
||||
while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8)
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 1u, Signed, Interprocess > :
|
||||
public core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) noexcept
|
||||
{
|
||||
storage_type previous = expected;
|
||||
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
|
||||
expected = old_val;
|
||||
return (previous == old_val);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
|
||||
}
|
||||
};
|
||||
|
||||
#elif defined(_M_IX86)
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 1u, Signed, Interprocess > :
|
||||
public core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
__asm
|
||||
{
|
||||
mov edx, storage
|
||||
movzx eax, v
|
||||
lock xadd byte ptr [edx], al
|
||||
mov v, al
|
||||
};
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
__asm
|
||||
{
|
||||
mov edx, storage
|
||||
movzx eax, v
|
||||
xchg byte ptr [edx], al
|
||||
mov v, al
|
||||
};
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) noexcept
|
||||
{
|
||||
base_type::fence_before(success_order);
|
||||
bool success;
|
||||
__asm
|
||||
{
|
||||
mov esi, expected
|
||||
mov edi, storage
|
||||
movzx eax, byte ptr [esi]
|
||||
movzx edx, desired
|
||||
lock cmpxchg byte ptr [edi], dl
|
||||
mov byte ptr [esi], al
|
||||
sete success
|
||||
};
|
||||
// The success and failure fences are equivalent anyway
|
||||
base_type::fence_after(success_order);
|
||||
return success;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
__asm
|
||||
{
|
||||
mov edi, storage
|
||||
movzx ecx, v
|
||||
xor edx, edx
|
||||
movzx eax, byte ptr [edi]
|
||||
align 16
|
||||
again:
|
||||
mov dl, al
|
||||
and dl, cl
|
||||
lock cmpxchg byte ptr [edi], dl
|
||||
jne again
|
||||
mov v, al
|
||||
};
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
__asm
|
||||
{
|
||||
mov edi, storage
|
||||
movzx ecx, v
|
||||
xor edx, edx
|
||||
movzx eax, byte ptr [edi]
|
||||
align 16
|
||||
again:
|
||||
mov dl, al
|
||||
or dl, cl
|
||||
lock cmpxchg byte ptr [edi], dl
|
||||
jne again
|
||||
mov v, al
|
||||
};
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
__asm
|
||||
{
|
||||
mov edi, storage
|
||||
movzx ecx, v
|
||||
xor edx, edx
|
||||
movzx eax, byte ptr [edi]
|
||||
align 16
|
||||
again:
|
||||
mov dl, al
|
||||
xor dl, cl
|
||||
lock cmpxchg byte ptr [edi], dl
|
||||
jne again
|
||||
mov v, al
|
||||
};
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 1u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 1u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16)
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 2u, Signed, Interprocess > :
|
||||
public core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) noexcept
|
||||
{
|
||||
storage_type previous = expected;
|
||||
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
|
||||
expected = old_val;
|
||||
return (previous == old_val);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
|
||||
}
|
||||
};
|
||||
|
||||
#elif defined(_M_IX86)
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 2u, Signed, Interprocess > :
|
||||
public core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
__asm
|
||||
{
|
||||
mov edx, storage
|
||||
movzx eax, v
|
||||
lock xadd word ptr [edx], ax
|
||||
mov v, ax
|
||||
};
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
__asm
|
||||
{
|
||||
mov edx, storage
|
||||
movzx eax, v
|
||||
xchg word ptr [edx], ax
|
||||
mov v, ax
|
||||
};
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) noexcept
|
||||
{
|
||||
base_type::fence_before(success_order);
|
||||
bool success;
|
||||
__asm
|
||||
{
|
||||
mov esi, expected
|
||||
mov edi, storage
|
||||
movzx eax, word ptr [esi]
|
||||
movzx edx, desired
|
||||
lock cmpxchg word ptr [edi], dx
|
||||
mov word ptr [esi], ax
|
||||
sete success
|
||||
};
|
||||
// The success and failure fences are equivalent anyway
|
||||
base_type::fence_after(success_order);
|
||||
return success;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
__asm
|
||||
{
|
||||
mov edi, storage
|
||||
movzx ecx, v
|
||||
xor edx, edx
|
||||
movzx eax, word ptr [edi]
|
||||
align 16
|
||||
again:
|
||||
mov dx, ax
|
||||
and dx, cx
|
||||
lock cmpxchg word ptr [edi], dx
|
||||
jne again
|
||||
mov v, ax
|
||||
};
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
__asm
|
||||
{
|
||||
mov edi, storage
|
||||
movzx ecx, v
|
||||
xor edx, edx
|
||||
movzx eax, word ptr [edi]
|
||||
align 16
|
||||
again:
|
||||
mov dx, ax
|
||||
or dx, cx
|
||||
lock cmpxchg word ptr [edi], dx
|
||||
jne again
|
||||
mov v, ax
|
||||
};
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
__asm
|
||||
{
|
||||
mov edi, storage
|
||||
movzx ecx, v
|
||||
xor edx, edx
|
||||
movzx eax, word ptr [edi]
|
||||
align 16
|
||||
again:
|
||||
mov dx, ax
|
||||
xor dx, cx
|
||||
lock cmpxchg word ptr [edi], dx
|
||||
jne again
|
||||
mov v, ax
|
||||
};
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 2u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 2u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct msvc_dcas_x86
|
||||
{
|
||||
using storage_type = typename storage_traits< 8u >::type;
|
||||
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
static constexpr bool full_cas_based = true;
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static constexpr std::size_t storage_size = 8u;
|
||||
static constexpr std::size_t storage_alignment = 8u;
|
||||
static constexpr bool is_signed = Signed;
|
||||
|
||||
// Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations:
|
||||
//
|
||||
// The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically:
|
||||
// * Reading or writing a quadword aligned on a 64-bit boundary
|
||||
//
|
||||
// Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations
|
||||
// have at least 8 byte alignment. The only unfortunate case is when atomic is placed on the stack and it is not 8-byte aligned (like on 32 bit Windows).
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
storage_type volatile* p = &storage;
|
||||
if (BOOST_LIKELY(order != memory_order_seq_cst && ((uintptr_t)p & 7u) == 0u))
|
||||
{
|
||||
#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
|
||||
#if defined(__AVX__)
|
||||
__asm
|
||||
{
|
||||
mov edx, p
|
||||
vmovq xmm4, v
|
||||
vmovq qword ptr [edx], xmm4
|
||||
};
|
||||
#else
|
||||
__asm
|
||||
{
|
||||
mov edx, p
|
||||
movq xmm4, v
|
||||
movq qword ptr [edx], xmm4
|
||||
};
|
||||
#endif
|
||||
#else
|
||||
__asm
|
||||
{
|
||||
mov edx, p
|
||||
fild v
|
||||
fistp qword ptr [edx]
|
||||
};
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
std::uint32_t backup;
|
||||
__asm
|
||||
{
|
||||
mov backup, ebx
|
||||
mov edi, p
|
||||
mov ebx, dword ptr [v]
|
||||
mov ecx, dword ptr [v + 4]
|
||||
mov eax, dword ptr [edi]
|
||||
mov edx, dword ptr [edi + 4]
|
||||
align 16
|
||||
again:
|
||||
lock cmpxchg8b qword ptr [edi]
|
||||
jne again
|
||||
mov ebx, backup
|
||||
};
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
storage_type const volatile* p = &storage;
|
||||
storage_type value;
|
||||
|
||||
if (BOOST_LIKELY(((uintptr_t)p & 7u) == 0u))
|
||||
{
|
||||
#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
|
||||
#if defined(__AVX__)
|
||||
__asm
|
||||
{
|
||||
mov edx, p
|
||||
vmovq xmm4, qword ptr [edx]
|
||||
vmovq value, xmm4
|
||||
};
|
||||
#else
|
||||
__asm
|
||||
{
|
||||
mov edx, p
|
||||
movq xmm4, qword ptr [edx]
|
||||
movq value, xmm4
|
||||
};
|
||||
#endif
|
||||
#else
|
||||
__asm
|
||||
{
|
||||
mov edx, p
|
||||
fild qword ptr [edx]
|
||||
fistp value
|
||||
};
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
// We don't care for comparison result here; the previous value will be stored into value anyway.
|
||||
// Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
|
||||
__asm
|
||||
{
|
||||
mov edi, p
|
||||
mov eax, ebx
|
||||
mov edx, ecx
|
||||
lock cmpxchg8b qword ptr [edi]
|
||||
mov dword ptr [value], eax
|
||||
mov dword ptr [value + 4], edx
|
||||
};
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) noexcept
|
||||
{
|
||||
// MSVC-11 in 32-bit mode sometimes generates messed up code without compiler barriers,
|
||||
// even though the _InterlockedCompareExchange64 intrinsic already provides one.
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
storage_type volatile* p = &storage;
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
|
||||
const storage_type old_val = (storage_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(p, desired, expected);
|
||||
const bool result = (old_val == expected);
|
||||
expected = old_val;
|
||||
#else
|
||||
bool result;
|
||||
std::uint32_t backup;
|
||||
__asm
|
||||
{
|
||||
mov backup, ebx
|
||||
mov edi, p
|
||||
mov esi, expected
|
||||
mov ebx, dword ptr [desired]
|
||||
mov ecx, dword ptr [desired + 4]
|
||||
mov eax, dword ptr [esi]
|
||||
mov edx, dword ptr [esi + 4]
|
||||
lock cmpxchg8b qword ptr [edi]
|
||||
mov dword ptr [esi], eax
|
||||
mov dword ptr [esi + 4], edx
|
||||
mov ebx, backup
|
||||
sete result
|
||||
};
|
||||
#endif
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
storage_type volatile* p = &storage;
|
||||
std::uint32_t backup;
|
||||
__asm
|
||||
{
|
||||
mov backup, ebx
|
||||
mov edi, p
|
||||
mov ebx, dword ptr [v]
|
||||
mov ecx, dword ptr [v + 4]
|
||||
mov eax, dword ptr [edi]
|
||||
mov edx, dword ptr [edi + 4]
|
||||
align 16
|
||||
again:
|
||||
lock cmpxchg8b qword ptr [edi]
|
||||
jne again
|
||||
mov ebx, backup
|
||||
mov dword ptr [v], eax
|
||||
mov dword ptr [v + 4], edx
|
||||
};
|
||||
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
|
||||
return v;
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 8u, Signed, Interprocess > :
|
||||
public core_operations_cas_based< msvc_dcas_x86< Signed, Interprocess > >
|
||||
{
|
||||
};
|
||||
|
||||
#elif defined(_M_AMD64)
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 8u, Signed, Interprocess > :
|
||||
public core_arch_operations_msvc_x86< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_arch_operations_msvc_x86< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) noexcept
|
||||
{
|
||||
storage_type previous = expected;
|
||||
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
|
||||
expected = old_val;
|
||||
return (previous == old_val);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct msvc_dcas_x86_64
|
||||
{
|
||||
using storage_type = typename storage_traits< 16u >::type;
|
||||
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
static constexpr bool full_cas_based = true;
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static constexpr std::size_t storage_size = 16u;
|
||||
static constexpr std::size_t storage_alignment = 16u;
|
||||
static constexpr bool is_signed = Signed;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
#if defined(__AVX__)
|
||||
if (BOOST_LIKELY(order != memory_order_seq_cst && (((uintptr_t)&storage) & 15u) == 0u))
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
__m128i value;
|
||||
BOOST_ATOMIC_DETAIL_MEMCPY(&value, &v, sizeof(value));
|
||||
_mm_store_si128(const_cast< __m128i* >(reinterpret_cast< volatile __m128i* >(&storage)), value);
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
return;
|
||||
}
|
||||
#endif // defined(__AVX__)
|
||||
|
||||
storage_type value = const_cast< storage_type& >(storage);
|
||||
while (!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, v, &value)) {}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) noexcept
|
||||
{
|
||||
storage_type value;
|
||||
#if defined(__AVX__)
|
||||
if (BOOST_LIKELY((((uintptr_t)&storage) & 15u) == 0u))
|
||||
{
|
||||
__m128i v = _mm_load_si128(const_cast< const __m128i* >(reinterpret_cast< const volatile __m128i* >(&storage)));
|
||||
BOOST_ATOMIC_DETAIL_MEMCPY(&value, &v, sizeof(value));
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
return value;
|
||||
}
|
||||
#endif // defined(__AVX__)
|
||||
|
||||
value = storage_type();
|
||||
BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, value, &value);
|
||||
return value;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) noexcept
|
||||
{
|
||||
return !!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, desired, &expected);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_arch_operations< 16u, Signed, Interprocess > :
|
||||
public core_operations_cas_based< cas_based_exchange< msvc_dcas_x86_64< Signed, Interprocess > > >
|
||||
{
|
||||
};
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
|
||||
49
include/boost/atomic/detail/core_operations.hpp
Normal file
49
include/boost/atomic/detail/core_operations.hpp
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_operations.hpp
|
||||
*
|
||||
* This header defines core atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/platform.hpp>
|
||||
#include <boost/atomic/detail/core_arch_operations.hpp>
|
||||
#include <boost/atomic/detail/core_operations_fwd.hpp>
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER)
|
||||
#include BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER(boost/atomic/detail/core_ops_)
|
||||
#endif
|
||||
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Default specialization that falls back to architecture-specific implementation
|
||||
template< std::size_t Size, bool Signed, bool Interprocess >
|
||||
struct core_operations :
|
||||
public core_arch_operations< Size, Signed, Interprocess >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_HPP_INCLUDED_
|
||||
194
include/boost/atomic/detail/core_operations_emulated.hpp
Normal file
194
include/boost/atomic/detail/core_operations_emulated.hpp
Normal file
@@ -0,0 +1,194 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_operations_emulated.hpp
|
||||
*
|
||||
* This header contains lock pool-based implementation of the core atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/core_operations_emulated_fwd.hpp>
|
||||
#include <boost/atomic/detail/lock_pool.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< std::size_t Size, std::size_t Alignment, bool = Alignment >= storage_traits< Size >::native_alignment >
|
||||
struct core_operations_emulated_base
|
||||
{
|
||||
using storage_type = typename storage_traits< Size >::type;
|
||||
};
|
||||
|
||||
template< std::size_t Size, std::size_t Alignment >
|
||||
struct core_operations_emulated_base< Size, Alignment, false >
|
||||
{
|
||||
using storage_type = buffer_storage< Size, Alignment >;
|
||||
};
|
||||
|
||||
//! Emulated implementation of core atomic operations
|
||||
template< std::size_t Size, std::size_t Alignment, bool Signed, bool Interprocess >
|
||||
struct core_operations_emulated :
|
||||
public core_operations_emulated_base< Size, Alignment >
|
||||
{
|
||||
using base_type = core_operations_emulated_base< Size, Alignment >;
|
||||
|
||||
// Define storage_type to have alignment not greater than Alignment. This will allow operations to work with value_types
|
||||
// that possibly have weaker alignment requirements than storage_traits< Size >::type would. This is important for atomic_ref<>.
|
||||
// atomic<> will allow higher alignment requirement than its value_type.
|
||||
// Note that storage_type should be an integral type, if possible, so that arithmetic and bitwise operations are possible.
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static constexpr std::size_t storage_size = Size;
|
||||
static constexpr std::size_t storage_alignment = Alignment >= storage_traits< Size >::alignment ? storage_traits< Size >::alignment : Alignment;
|
||||
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
static constexpr bool full_cas_based = false;
|
||||
|
||||
static constexpr bool is_always_lock_free = false;
|
||||
|
||||
using scoped_lock = lock_pool::scoped_lock< storage_alignment >;
|
||||
|
||||
static void store(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
scoped_lock lock(&storage);
|
||||
const_cast< storage_type& >(storage) = v;
|
||||
}
|
||||
|
||||
static storage_type load(storage_type const volatile& storage, memory_order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
scoped_lock lock(&storage);
|
||||
return const_cast< storage_type const& >(storage);
|
||||
}
|
||||
|
||||
static storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type old_val = s;
|
||||
s += v;
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type old_val = s;
|
||||
s -= v;
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type old_val = s;
|
||||
s = v;
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type old_val = s;
|
||||
const bool res = old_val == expected;
|
||||
if (res)
|
||||
s = desired;
|
||||
expected = old_val;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) noexcept
|
||||
{
|
||||
// Note: This function is the exact copy of compare_exchange_strong. The reason we're not just forwarding the call
|
||||
// is that MSVC-12 ICEs in this case.
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type old_val = s;
|
||||
const bool res = old_val == expected;
|
||||
if (res)
|
||||
s = desired;
|
||||
expected = old_val;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type old_val = s;
|
||||
s &= v;
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type old_val = s;
|
||||
s |= v;
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type old_val = s;
|
||||
s ^= v;
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
return !!exchange(storage, (storage_type)1, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
store(storage, (storage_type)0, order);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_HPP_INCLUDED_
|
||||
38
include/boost/atomic/detail/core_operations_emulated_fwd.hpp
Normal file
38
include/boost/atomic/detail/core_operations_emulated_fwd.hpp
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_operations_emulated_fwd.hpp
|
||||
*
|
||||
* This header forward-declares lock pool-based implementation of the core atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_FWD_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_FWD_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< std::size_t Size, std::size_t Alignment, bool Signed, bool Interprocess >
|
||||
struct core_operations_emulated;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_FWD_HPP_INCLUDED_
|
||||
38
include/boost/atomic/detail/core_operations_fwd.hpp
Normal file
38
include/boost/atomic/detail/core_operations_fwd.hpp
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_operations_fwd.hpp
|
||||
*
|
||||
* This header contains forward declaration of the \c core_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< std::size_t Size, bool Signed, bool Interprocess >
|
||||
struct core_operations;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
94
include/boost/atomic/detail/core_ops_cas_based.hpp
Normal file
94
include/boost/atomic/detail/core_ops_cas_based.hpp
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_ops_cas_based.hpp
|
||||
*
|
||||
* This header contains CAS-based implementation of core atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_CAS_BASED_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_OPS_CAS_BASED_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename Base >
|
||||
struct core_operations_cas_based :
|
||||
public Base
|
||||
{
|
||||
using storage_type = typename Base::storage_type;
|
||||
|
||||
static constexpr bool full_cas_based = true;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
while (!Base::compare_exchange_weak(storage, old_val, old_val + v, order, memory_order_relaxed)) {}
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
while (!Base::compare_exchange_weak(storage, old_val, old_val - v, order, memory_order_relaxed)) {}
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
while (!Base::compare_exchange_weak(storage, old_val, old_val & v, order, memory_order_relaxed)) {}
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
while (!Base::compare_exchange_weak(storage, old_val, old_val | v, order, memory_order_relaxed)) {}
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
while (!Base::compare_exchange_weak(storage, old_val, old_val ^ v, order, memory_order_relaxed)) {}
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!Base::exchange(storage, (storage_type)1, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
Base::store(storage, (storage_type)0, order);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_CAS_BASED_HPP_INCLUDED_
|
||||
309
include/boost/atomic/detail/core_ops_gcc_atomic.hpp
Normal file
309
include/boost/atomic/detail/core_ops_gcc_atomic.hpp
Normal file
@@ -0,0 +1,309 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_ops_gcc_atomic.hpp
|
||||
*
|
||||
* This header contains implementation of the \c core_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_ATOMIC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_ATOMIC_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/core_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/core_arch_operations.hpp>
|
||||
#include <boost/atomic/detail/capabilities.hpp>
|
||||
#include <boost/atomic/detail/gcc_atomic_memory_order_utils.hpp>
|
||||
|
||||
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE || \
|
||||
BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE || \
|
||||
BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE || \
|
||||
BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE
|
||||
// There are platforms where we need to use larger storage types
|
||||
#include <boost/atomic/detail/int_sizes.hpp>
|
||||
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
|
||||
#endif
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__INTEL_COMPILER)
|
||||
// This is used to suppress warning #32013 described in gcc_atomic_memory_order_utils.hpp
|
||||
// for Intel Compiler.
|
||||
// In debug builds the compiler does not inline any functions, so basically
|
||||
// every atomic function call results in this warning. I don't know any other
|
||||
// way to selectively disable just this one warning.
|
||||
#pragma system_header
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< std::size_t Size, bool Signed, bool Interprocess >
|
||||
struct core_operations_gcc_atomic
|
||||
{
|
||||
using storage_type = typename storage_traits< Size >::type;
|
||||
|
||||
static constexpr std::size_t storage_size = Size;
|
||||
static constexpr std::size_t storage_alignment = storage_traits< Size >::alignment;
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
static constexpr bool full_cas_based = false;
|
||||
|
||||
// Note: In the current implementation, core_operations_gcc_atomic are used only when the particularly sized __atomic
|
||||
// intrinsics are always lock-free (i.e. the corresponding LOCK_FREE macro is 2). Therefore it is safe to
|
||||
// always set is_always_lock_free to true here.
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
#if defined(BOOST_GCC) && BOOST_GCC < 100100 && (defined(__x86_64__) || defined(__i386__))
|
||||
// gcc up to 10.1 generates mov + mfence for seq_cst stores, which is slower than xchg
|
||||
if (order != memory_order_seq_cst)
|
||||
__atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
|
||||
else
|
||||
__atomic_exchange_n(&storage, v, __ATOMIC_SEQ_CST);
|
||||
#else
|
||||
__atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
|
||||
#endif
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_RCPC) && !((defined(BOOST_GCC) && BOOST_GCC >= 130100) || (defined(BOOST_CLANG) && BOOST_CLANG_VERSION >= 160000))
|
||||
// At least gcc 9.3 and clang 10 do not generate relaxed ldapr instructions that are available in ARMv8.3-RCPC extension.
|
||||
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95751
|
||||
// This was fixed in gcc 13.1 and clang 16.
|
||||
using core_arch_operations = atomics::detail::core_arch_operations< storage_size, is_signed, is_interprocess >;
|
||||
return core_arch_operations::load(storage, order);
|
||||
#else
|
||||
return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));
|
||||
#endif
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
return __atomic_compare_exchange_n
|
||||
(
|
||||
&storage, &expected, desired, false,
|
||||
atomics::detail::convert_memory_order_to_gcc(success_order),
|
||||
atomics::detail::convert_memory_order_to_gcc(failure_order)
|
||||
);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
return __atomic_compare_exchange_n
|
||||
(
|
||||
&storage, &expected, desired, true,
|
||||
atomics::detail::convert_memory_order_to_gcc(success_order),
|
||||
atomics::detail::convert_memory_order_to_gcc(failure_order)
|
||||
);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
__atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));
|
||||
}
|
||||
};
|
||||
|
||||
// We want to only enable __atomic* intrinsics when the corresponding BOOST_ATOMIC_DETAIL_GCC_ATOMIC_*_LOCK_FREE macro indicates
|
||||
// the same or better lock-free guarantees as the BOOST_ATOMIC_*_LOCK_FREE macro. Otherwise, we want to leave core_operations
|
||||
// unspecialized, so that core_arch_operations is used instead.
|
||||
|
||||
#if BOOST_ATOMIC_INT128_LOCK_FREE > 0 && BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT128_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 16u, Signed, Interprocess > :
|
||||
public core_operations_gcc_atomic< 16u, Signed, Interprocess >
|
||||
{
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
|
||||
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 8u, Signed, Interprocess > :
|
||||
public core_operations_gcc_atomic< 8u, Signed, Interprocess >
|
||||
{
|
||||
};
|
||||
|
||||
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT64_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 8u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 8u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif // BOOST_ATOMIC_INT64_LOCK_FREE > 0
|
||||
|
||||
|
||||
#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
|
||||
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 4u, Signed, Interprocess > :
|
||||
public core_operations_gcc_atomic< 4u, Signed, Interprocess >
|
||||
{
|
||||
};
|
||||
|
||||
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 4u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 8u, Signed, Interprocess >, 4u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 8u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 4u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif // BOOST_ATOMIC_INT32_LOCK_FREE > 0
|
||||
|
||||
|
||||
#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
|
||||
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 2u, Signed, Interprocess > :
|
||||
public core_operations_gcc_atomic< 2u, Signed, Interprocess >
|
||||
{
|
||||
};
|
||||
|
||||
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 2u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 4u, Signed, Interprocess >, 2u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 2u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 8u, Signed, Interprocess >, 2u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 2u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 2u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif // BOOST_ATOMIC_INT16_LOCK_FREE > 0
|
||||
|
||||
|
||||
#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
|
||||
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 1u, Signed, Interprocess > :
|
||||
public core_operations_gcc_atomic< 1u, Signed, Interprocess >
|
||||
{
|
||||
};
|
||||
|
||||
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 1u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 2u, Signed, Interprocess >, 1u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 1u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 4u, Signed, Interprocess >, 1u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 1u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 8u, Signed, Interprocess >, 1u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 1u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 1u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif // BOOST_ATOMIC_INT8_LOCK_FREE > 0
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_ATOMIC_HPP_INCLUDED_
|
||||
265
include/boost/atomic/detail/core_ops_gcc_sync.hpp
Normal file
265
include/boost/atomic/detail/core_ops_gcc_sync.hpp
Normal file
@@ -0,0 +1,265 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2011 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_ops_gcc_sync.hpp
|
||||
*
|
||||
* This header contains implementation of the \c core_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <type_traits>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/core_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
|
||||
#include <boost/atomic/detail/capabilities.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
struct core_operations_gcc_sync_base
|
||||
{
|
||||
static constexpr bool full_cas_based = false;
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static BOOST_FORCEINLINE void fence_before_store(memory_order order) noexcept
|
||||
{
|
||||
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
|
||||
__sync_synchronize();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after_store(memory_order order) noexcept
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
__sync_synchronize();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after_load(memory_order order) noexcept
|
||||
{
|
||||
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_consume))) != 0u)
|
||||
__sync_synchronize();
|
||||
}
|
||||
};
|
||||
|
||||
template< std::size_t Size, bool Signed, bool Interprocess >
|
||||
struct core_operations_gcc_sync :
|
||||
public core_operations_gcc_sync_base
|
||||
{
|
||||
using storage_type = typename storage_traits< Size >::type;
|
||||
|
||||
static constexpr std::size_t storage_size = Size;
|
||||
static constexpr std::size_t storage_alignment = storage_traits< storage_size >::alignment;
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
|
||||
private:
|
||||
// In general, we cannot guarantee atomicity of plain loads and stores of anything larger than a single byte on
|
||||
// an arbitrary CPU architecture. However, all modern architectures seem to guarantee atomic loads and stores of
|
||||
// suitably aligned objects of up to a pointer size. For larger objects we should probably use intrinsics to guarantee
|
||||
// atomicity. If there appears an architecture where this doesn't hold, this threshold needs to be updated (patches are welcome).
|
||||
using plain_stores_loads_are_atomic = std::integral_constant< bool, storage_size <= sizeof(void*) >;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order, std::true_type) noexcept
|
||||
{
|
||||
fence_before_store(order);
|
||||
storage = v;
|
||||
fence_after_store(order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order, std::false_type) noexcept
|
||||
{
|
||||
exchange(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order, std::true_type) noexcept
|
||||
{
|
||||
storage_type v = storage;
|
||||
fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order, std::false_type) noexcept
|
||||
{
|
||||
// Note: don't use fetch_add or other arithmetics here since storage_type may not be an arithmetic type.
|
||||
storage_type expected = storage_type();
|
||||
storage_type desired = expected;
|
||||
// We don't care if CAS succeeds or not. If it does, it will just write the same value there was before.
|
||||
return __sync_val_compare_and_swap(const_cast< storage_type volatile* >(&storage), expected, desired);
|
||||
}
|
||||
|
||||
public:
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
store(storage, v, order, plain_stores_loads_are_atomic());
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return load(storage, order, plain_stores_loads_are_atomic());
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return __sync_fetch_and_add(&storage, v);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return __sync_fetch_and_sub(&storage, v);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
// GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of
|
||||
// std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always
|
||||
// add a check here and fall back to a CAS loop.
|
||||
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
|
||||
__sync_synchronize();
|
||||
return __sync_lock_test_and_set(&storage, v);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) noexcept
|
||||
{
|
||||
storage_type expected2 = expected;
|
||||
storage_type old_val = __sync_val_compare_and_swap(&storage, expected2, desired);
|
||||
|
||||
if (old_val == expected2)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
expected = old_val;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return __sync_fetch_and_and(&storage, v);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return __sync_fetch_and_or(&storage, v);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
return __sync_fetch_and_xor(&storage, v);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
|
||||
__sync_synchronize();
|
||||
return !!__sync_lock_test_and_set(&storage, 1);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
__sync_lock_release(&storage);
|
||||
if (order == memory_order_seq_cst)
|
||||
__sync_synchronize();
|
||||
}
|
||||
};
|
||||
|
||||
#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 1u, Signed, Interprocess > :
|
||||
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
|
||||
public core_operations_gcc_sync< 1u, Signed, Interprocess >
|
||||
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_sync< 2u, Signed, Interprocess >, 1u, Signed >
|
||||
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_sync< 4u, Signed, Interprocess >, 1u, Signed >
|
||||
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 1u, Signed >
|
||||
#else
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 1u, Signed >
|
||||
#endif
|
||||
{
|
||||
};
|
||||
#endif
|
||||
|
||||
#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 2u, Signed, Interprocess > :
|
||||
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
|
||||
public core_operations_gcc_sync< 2u, Signed, Interprocess >
|
||||
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_sync< 4u, Signed, Interprocess >, 2u, Signed >
|
||||
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 2u, Signed >
|
||||
#else
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 2u, Signed >
|
||||
#endif
|
||||
{
|
||||
};
|
||||
#endif
|
||||
|
||||
#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 4u, Signed, Interprocess > :
|
||||
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
|
||||
public core_operations_gcc_sync< 4u, Signed, Interprocess >
|
||||
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 4u, Signed >
|
||||
#else
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 4u, Signed >
|
||||
#endif
|
||||
{
|
||||
};
|
||||
#endif
|
||||
|
||||
#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 8u, Signed, Interprocess > :
|
||||
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
|
||||
public core_operations_gcc_sync< 8u, Signed, Interprocess >
|
||||
#else
|
||||
public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 8u, Signed >
|
||||
#endif
|
||||
{
|
||||
};
|
||||
#endif
|
||||
|
||||
#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 16u, Signed, Interprocess > :
|
||||
public core_operations_gcc_sync< 16u, Signed, Interprocess >
|
||||
{
|
||||
};
|
||||
#endif
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
|
||||
169
include/boost/atomic/detail/core_ops_linux_arm.hpp
Normal file
169
include/boost/atomic/detail/core_ops_linux_arm.hpp
Normal file
@@ -0,0 +1,169 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009, 2011 Helge Bahmann
|
||||
* Copyright (c) 2009 Phil Endecott
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Linux-specific code by Phil Endecott
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_ops_linux_arm.hpp
|
||||
*
|
||||
* This header contains implementation of the \c core_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_LINUX_ARM_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_OPS_LINUX_ARM_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/core_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/core_ops_cas_based.hpp>
|
||||
#include <boost/atomic/detail/cas_based_exchange.hpp>
|
||||
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
|
||||
#include <boost/atomic/detail/fence_operations.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
// Different ARM processors have different atomic instructions. In particular,
|
||||
// architecture versions before v6 (which are still in widespread use, e.g. the
|
||||
// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap.
|
||||
// On Linux the kernel provides some support that lets us abstract away from
|
||||
// these differences: it provides emulated CAS and barrier functions at special
|
||||
// addresses that are guaranteed not to be interrupted by the kernel. Using
|
||||
// this facility is slightly slower than inline assembler would be, but much
|
||||
// faster than a system call.
|
||||
//
|
||||
// https://lwn.net/Articles/314561/
|
||||
//
|
||||
// While this emulated CAS is "strong" in the sense that it does not fail
|
||||
// "spuriously" (i.e.: it never fails to perform the exchange when the value
|
||||
// found equals the value expected), it does not return the found value on
|
||||
// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must
|
||||
// return the found value on failure, and we have to manually load this value
|
||||
// after the emulated CAS reports failure. This in turn introduces a race
|
||||
// between the CAS failing (due to the "wrong" value being found) and subsequently
|
||||
// loading (which might turn up the "right" value). From an application's
|
||||
// point of view this looks like "spurious failure", and therefore the
|
||||
// emulated CAS is only good enough to provide compare_exchange_weak
|
||||
// semantics.
|
||||
|
||||
struct linux_arm_cas_base
|
||||
{
|
||||
static constexpr bool full_cas_based = true;
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static BOOST_FORCEINLINE void fence_before_store(memory_order order) noexcept
|
||||
{
|
||||
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
|
||||
fence_operations::hardware_full_fence();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after_store(memory_order order) noexcept
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
fence_operations::hardware_full_fence();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after_load(memory_order order) noexcept
|
||||
{
|
||||
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
|
||||
fence_operations::hardware_full_fence();
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct linux_arm_cas :
|
||||
public linux_arm_cas_base
|
||||
{
|
||||
using storage_type = typename storage_traits< 4u >::type;
|
||||
|
||||
static constexpr std::size_t storage_size = 4u;
|
||||
static constexpr std::size_t storage_alignment = 4u;
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
fence_before_store(order);
|
||||
storage = v;
|
||||
fence_after_store(order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type v = storage;
|
||||
fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
storage_type tmp = expected;
|
||||
if (compare_exchange_weak(storage, tmp, desired, success_order, failure_order))
|
||||
return true;
|
||||
if (tmp != expected)
|
||||
{
|
||||
expected = tmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) noexcept
|
||||
{
|
||||
using kernel_cmpxchg32_t = storage_type (storage_type oldval, storage_type newval, volatile storage_type* ptr);
|
||||
|
||||
if (((kernel_cmpxchg32_t*)0xffff0fc0)(expected, desired, &storage) == 0)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
expected = storage;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 1u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_cas_based< cas_based_exchange< linux_arm_cas< Signed, Interprocess > > >, 1u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 2u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations_cas_based< cas_based_exchange< linux_arm_cas< Signed, Interprocess > > >, 2u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 4u, Signed, Interprocess > :
|
||||
public core_operations_cas_based< cas_based_exchange< linux_arm_cas< Signed, Interprocess > > >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_LINUX_ARM_HPP_INCLUDED_
|
||||
201
include/boost/atomic/detail/core_ops_windows.hpp
Normal file
201
include/boost/atomic/detail/core_ops_windows.hpp
Normal file
@@ -0,0 +1,201 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2012 Tim Blechmann
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/core_ops_windows.hpp
|
||||
*
|
||||
* This header contains implementation of the \c core_operations template.
|
||||
*
|
||||
* This implementation is the most basic version for Windows. It should
|
||||
* work for any non-MSVC-like compilers as long as there are Interlocked WinAPI
|
||||
* functions available. This version is also used for WinCE.
|
||||
*
|
||||
* Notably, this implementation is not as efficient as other
|
||||
* versions based on compiler intrinsics.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_WINDOWS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_CORE_OPS_WINDOWS_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/interlocked.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/core_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/type_traits/make_signed.hpp>
|
||||
#include <boost/atomic/detail/ops_msvc_common.hpp>
|
||||
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
struct core_operations_windows_base
|
||||
{
|
||||
static constexpr bool full_cas_based = false;
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static BOOST_FORCEINLINE void fence_before(memory_order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after(memory_order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
};
|
||||
|
||||
template< std::size_t Size, bool Signed, bool Interprocess, typename Derived >
|
||||
struct core_operations_windows :
|
||||
public core_operations_windows_base
|
||||
{
|
||||
using storage_type = typename storage_traits< Size >::type;
|
||||
|
||||
static constexpr std::size_t storage_size = Size;
|
||||
static constexpr std::size_t storage_alignment = storage_traits< Size >::alignment;
|
||||
static constexpr bool is_signed = Signed;
|
||||
static constexpr bool is_interprocess = Interprocess;
|
||||
|
||||
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
Derived::exchange(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return Derived::fetch_add(const_cast< storage_type volatile& >(storage), (storage_type)0, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
using signed_storage_type = typename boost::atomics::detail::make_signed< storage_type >::type;
|
||||
return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_weak(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!Derived::exchange(storage, (storage_type)1, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
store(storage, (storage_type)0, order);
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 4u, Signed, bool Interprocess > :
|
||||
public core_operations_windows< 4u, Signed, Interprocess, core_operations< 4u, Signed, Interprocess > >
|
||||
{
|
||||
using base_type = core_operations_windows< 4u, Signed, Interprocess, core_operations< 4u, Signed, Interprocess > >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fence_before(order);
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool compare_exchange_strong(
|
||||
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
storage_type previous = expected;
|
||||
base_type::fence_before(success_order);
|
||||
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
|
||||
expected = old_val;
|
||||
// The success and failure fences are the same anyway
|
||||
base_type::fence_after(success_order);
|
||||
return (previous == old_val);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
|
||||
base_type::fence_before(order);
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
#else
|
||||
storage_type res = storage;
|
||||
while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
|
||||
return res;
|
||||
#endif
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
|
||||
base_type::fence_before(order);
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
#else
|
||||
storage_type res = storage;
|
||||
while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
|
||||
return res;
|
||||
#endif
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
|
||||
base_type::fence_before(order);
|
||||
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
|
||||
base_type::fence_after(order);
|
||||
return v;
|
||||
#else
|
||||
storage_type res = storage;
|
||||
while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
|
||||
return res;
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 1u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations< 4u, Signed, Interprocess >, 1u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
template< bool Signed, bool Interprocess >
|
||||
struct core_operations< 2u, Signed, Interprocess > :
|
||||
public extending_cas_based_arithmetic< core_operations< 4u, Signed, Interprocess >, 2u, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_WINDOWS_HPP_INCLUDED_
|
||||
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extending_cas_based_arithmetic.hpp
|
||||
*
|
||||
* This header contains a boilerplate of core atomic operations that require sign/zero extension in arithmetic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTENDING_CAS_BASED_ARITHMETIC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTENDING_CAS_BASED_ARITHMETIC_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/integral_conversions.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename Base, std::size_t Size, bool Signed >
|
||||
struct extending_cas_based_arithmetic :
|
||||
public Base
|
||||
{
|
||||
using storage_type = typename Base::storage_type;
|
||||
using emulated_storage_type = typename storage_traits< Size >::type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
storage_type new_val;
|
||||
do
|
||||
{
|
||||
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val + v));
|
||||
}
|
||||
while (!Base::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
storage_type new_val;
|
||||
do
|
||||
{
|
||||
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val - v));
|
||||
}
|
||||
while (!Base::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
|
||||
return old_val;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_EXTENDING_CAS_BASED_ARITHMETIC_HPP_INCLUDED_
|
||||
28
include/boost/atomic/detail/extra_fp_operations.hpp
Normal file
28
include/boost/atomic/detail/extra_fp_operations.hpp
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extra_fp_operations.hpp
|
||||
*
|
||||
* This header defines extra floating point atomic operations, including the generic version.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/extra_fp_ops_generic.hpp>
|
||||
#include <boost/atomic/detail/extra_fp_ops_emulated.hpp>
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND_GENERIC)
|
||||
#include BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND_HEADER(boost/atomic/detail/extra_fp_ops_)
|
||||
#endif
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_HPP_INCLUDED_
|
||||
38
include/boost/atomic/detail/extra_fp_operations_fwd.hpp
Normal file
38
include/boost/atomic/detail/extra_fp_operations_fwd.hpp
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extra_fp_operations_fwd.hpp
|
||||
*
|
||||
* This header contains forward declaration of the \c extra_fp_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename Base, typename Value = typename Base::value_type, std::size_t Size = sizeof(typename Base::storage_type), bool = Base::is_always_lock_free >
|
||||
struct extra_fp_operations;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
117
include/boost/atomic/detail/extra_fp_ops_emulated.hpp
Normal file
117
include/boost/atomic/detail/extra_fp_ops_emulated.hpp
Normal file
@@ -0,0 +1,117 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extra_fp_ops_emulated.hpp
|
||||
*
|
||||
* This header contains emulated (lock-based) implementation of the extra floating point atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
|
||||
#include <boost/atomic/detail/extra_fp_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Emulated implementation of extra floating point operations
|
||||
template< typename Base, typename Value, std::size_t Size >
|
||||
struct extra_fp_operations_emulated :
|
||||
public Base
|
||||
{
|
||||
using base_type = Base;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
using value_type = Value;
|
||||
using scoped_lock = typename base_type::scoped_lock;
|
||||
|
||||
static value_type fetch_negate(storage_type volatile& storage, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
|
||||
value_type new_val = -old_val;
|
||||
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static value_type negate(storage_type volatile& storage, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
|
||||
value_type new_val = -old_val;
|
||||
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static value_type add(storage_type volatile& storage, value_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
|
||||
value_type new_val = old_val + v;
|
||||
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static value_type sub(storage_type volatile& storage, value_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
|
||||
value_type new_val = old_val - v;
|
||||
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
fetch_negate(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, value_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
base_type::fetch_add(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, value_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
base_type::fetch_sub(storage, v, order);
|
||||
}
|
||||
};
|
||||
|
||||
template< typename Base, typename Value, std::size_t Size >
|
||||
struct extra_fp_operations< Base, Value, Size, false > :
|
||||
public extra_fp_operations_emulated< Base, Value, Size >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_
|
||||
192
include/boost/atomic/detail/extra_fp_ops_generic.hpp
Normal file
192
include/boost/atomic/detail/extra_fp_ops_generic.hpp
Normal file
@@ -0,0 +1,192 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extra_fp_ops_generic.hpp
|
||||
*
|
||||
* This header contains generic implementation of the extra floating point atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_GENERIC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_GENERIC_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/extra_fp_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/type_traits/is_iec559.hpp>
|
||||
#include <boost/atomic/detail/type_traits/is_integral.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_GCC) && BOOST_GCC >= 60000
|
||||
#pragma GCC diagnostic push
|
||||
// ignoring attributes on template argument X - this warning is because we need to pass storage_type as a template argument; no problem in this case
|
||||
#pragma GCC diagnostic ignored "-Wignored-attributes"
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Negate implementation
|
||||
template<
|
||||
typename Base,
|
||||
typename Value,
|
||||
std::size_t Size
|
||||
#if defined(BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH)
|
||||
, bool = atomics::detail::is_iec559< Value >::value && atomics::detail::is_integral< typename Base::storage_type >::value
|
||||
#endif
|
||||
>
|
||||
struct extra_fp_negate_generic :
|
||||
public Base
|
||||
{
|
||||
using base_type = Base;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
using value_type = Value;
|
||||
|
||||
static BOOST_FORCEINLINE value_type fetch_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_storage, new_storage;
|
||||
value_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_storage);
|
||||
do
|
||||
{
|
||||
old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
|
||||
new_val = -old_val;
|
||||
new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE value_type negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_storage, new_storage;
|
||||
value_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_storage);
|
||||
do
|
||||
{
|
||||
old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
|
||||
new_val = -old_val;
|
||||
new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
fetch_negate(storage, order);
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH)
|
||||
|
||||
//! Negate implementation for IEEE 754 / IEC 559 floating point types. We leverage the fact that the sign bit is the most significant bit in the value.
|
||||
template< typename Base, typename Value, std::size_t Size >
|
||||
struct extra_fp_negate_generic< Base, Value, Size, true > :
|
||||
public Base
|
||||
{
|
||||
using base_type = Base;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
using value_type = Value;
|
||||
|
||||
//! The mask with only one sign bit set to 1
|
||||
static constexpr storage_type sign_mask = static_cast< storage_type >(1u) << (atomics::detail::value_size_of< value_type >::value * 8u - 1u);
|
||||
|
||||
static BOOST_FORCEINLINE value_type fetch_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return atomics::detail::bitwise_fp_cast< value_type >(base_type::fetch_xor(storage, sign_mask, order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE value_type negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return atomics::detail::bitwise_fp_cast< value_type >(base_type::bitwise_xor(storage, sign_mask, order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
base_type::opaque_xor(storage, sign_mask, order);
|
||||
}
|
||||
};
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH)
|
||||
|
||||
//! Generic implementation of floating point operations
|
||||
template< typename Base, typename Value, std::size_t Size >
|
||||
struct extra_fp_operations_generic :
|
||||
public extra_fp_negate_generic< Base, Value, Size >
|
||||
{
|
||||
using base_type = extra_fp_negate_generic< Base, Value, Size >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
using value_type = Value;
|
||||
|
||||
static BOOST_FORCEINLINE value_type add(storage_type volatile& storage, value_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_storage, new_storage;
|
||||
value_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_storage);
|
||||
do
|
||||
{
|
||||
old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
|
||||
new_val = old_val + v;
|
||||
new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE value_type sub(storage_type volatile& storage, value_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_storage, new_storage;
|
||||
value_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_storage);
|
||||
do
|
||||
{
|
||||
old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
|
||||
new_val = old_val - v;
|
||||
new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, value_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_add(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, value_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_sub(storage, v, order);
|
||||
}
|
||||
};
|
||||
|
||||
// Default extra_fp_operations template definition will be used unless specialized for a specific platform
|
||||
template< typename Base, typename Value, std::size_t Size >
|
||||
struct extra_fp_operations< Base, Value, Size, true > :
|
||||
public extra_fp_operations_generic< Base, Value, Size >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#if defined(BOOST_GCC) && BOOST_GCC >= 60000
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_
|
||||
28
include/boost/atomic/detail/extra_operations.hpp
Normal file
28
include/boost/atomic/detail/extra_operations.hpp
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2017 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extra_operations.hpp
|
||||
*
|
||||
* This header defines extra atomic operations, including the generic version.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/extra_ops_generic.hpp>
|
||||
#include <boost/atomic/detail/extra_ops_emulated.hpp>
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_GENERIC)
|
||||
#include BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_HEADER(boost/atomic/detail/extra_ops_)
|
||||
#endif
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_HPP_INCLUDED_
|
||||
38
include/boost/atomic/detail/extra_operations_fwd.hpp
Normal file
38
include/boost/atomic/detail/extra_operations_fwd.hpp
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2017 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extra_operations_fwd.hpp
|
||||
*
|
||||
* This header contains forward declaration of the \c extra_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename Base, std::size_t Size = sizeof(typename Base::storage_type), bool Signed = Base::is_signed, bool = Base::is_always_lock_free >
|
||||
struct extra_operations;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
257
include/boost/atomic/detail/extra_ops_emulated.hpp
Normal file
257
include/boost/atomic/detail/extra_ops_emulated.hpp
Normal file
@@ -0,0 +1,257 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extra_ops_emulated.hpp
|
||||
*
|
||||
* This header contains emulated (lock-based) implementation of the extra atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/extra_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Emulated implementation of extra operations
|
||||
template< typename Base, std::size_t Size, bool Signed >
|
||||
struct extra_operations_emulated :
|
||||
public Base
|
||||
{
|
||||
using base_type = Base;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
using scoped_lock = typename base_type::scoped_lock;
|
||||
|
||||
static storage_type fetch_negate(storage_type volatile& storage, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type old_val = s;
|
||||
s = static_cast< storage_type >(-old_val);
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static storage_type negate(storage_type volatile& storage, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type new_val = static_cast< storage_type >(-s);
|
||||
s = new_val;
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static storage_type add(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type new_val = s;
|
||||
new_val += v;
|
||||
s = new_val;
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static storage_type sub(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type new_val = s;
|
||||
new_val -= v;
|
||||
s = new_val;
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type new_val = s;
|
||||
new_val &= v;
|
||||
s = new_val;
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type new_val = s;
|
||||
new_val |= v;
|
||||
s = new_val;
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type new_val = s;
|
||||
new_val ^= v;
|
||||
s = new_val;
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static storage_type fetch_complement(storage_type volatile& storage, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type old_val = s;
|
||||
s = static_cast< storage_type >(~old_val);
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static storage_type bitwise_complement(storage_type volatile& storage, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
storage_type new_val = static_cast< storage_type >(~s);
|
||||
s = new_val;
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
base_type::fetch_add(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
base_type::fetch_sub(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
fetch_negate(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
base_type::fetch_and(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
base_type::fetch_or(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
base_type::fetch_xor(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
fetch_complement(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
return !!add(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
return !!sub(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
return !!negate(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
return !!bitwise_and(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
return !!bitwise_or(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
return !!bitwise_xor(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
return !!bitwise_complement(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
|
||||
storage_type old_val = base_type::fetch_or(storage, mask, order);
|
||||
return !!(old_val & mask);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
|
||||
storage_type old_val = base_type::fetch_and(storage, ~mask, order);
|
||||
return !!(old_val & mask);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
|
||||
storage_type old_val = base_type::fetch_xor(storage, mask, order);
|
||||
return !!(old_val & mask);
|
||||
}
|
||||
};
|
||||
|
||||
template< typename Base, std::size_t Size, bool Signed >
|
||||
struct extra_operations< Base, Size, Signed, false > :
|
||||
public extra_operations_emulated< Base, Size, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_
|
||||
1132
include/boost/atomic/detail/extra_ops_gcc_aarch32.hpp
Normal file
1132
include/boost/atomic/detail/extra_ops_gcc_aarch32.hpp
Normal file
File diff suppressed because it is too large
Load Diff
1420
include/boost/atomic/detail/extra_ops_gcc_aarch64.hpp
Normal file
1420
include/boost/atomic/detail/extra_ops_gcc_aarch64.hpp
Normal file
File diff suppressed because it is too large
Load Diff
1119
include/boost/atomic/detail/extra_ops_gcc_arm.hpp
Normal file
1119
include/boost/atomic/detail/extra_ops_gcc_arm.hpp
Normal file
File diff suppressed because it is too large
Load Diff
916
include/boost/atomic/detail/extra_ops_gcc_ppc.hpp
Normal file
916
include/boost/atomic/detail/extra_ops_gcc_ppc.hpp
Normal file
@@ -0,0 +1,916 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2017-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extra_ops_gcc_ppc.hpp
|
||||
*
|
||||
* This header contains implementation of the extra atomic operations for PowerPC.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_PPC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_PPC_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/extra_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/extra_ops_generic.hpp>
|
||||
#include <boost/atomic/detail/ops_gcc_ppc_common.hpp>
|
||||
#include <boost/atomic/detail/gcc_ppc_asm_common.hpp>
|
||||
#include <boost/atomic/detail/capabilities.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename Base >
|
||||
struct extra_operations_gcc_ppc_common :
|
||||
public Base
|
||||
{
|
||||
using base_type = Base;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_negate(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_complement(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!base_type::negate(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!base_type::add(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!base_type::sub(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!base_type::bitwise_and(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!base_type::bitwise_or(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!base_type::bitwise_xor(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!base_type::bitwise_complement(storage, order);
|
||||
}
|
||||
};
|
||||
|
||||
template< typename Base, std::size_t Size, bool Signed >
|
||||
struct extra_operations_gcc_ppc;
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
|
||||
|
||||
template< typename Base, bool Signed >
|
||||
struct extra_operations_gcc_ppc< Base, 1u, Signed > :
|
||||
public extra_operations_generic< Base, 1u, Signed >
|
||||
{
|
||||
using base_type = extra_operations_generic< Base, 1u, Signed >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lbarx %0,%y2\n\t"
|
||||
"neg %1,%0\n\t"
|
||||
"stbcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lbarx %0,%y2\n\t"
|
||||
"neg %1,%0\n\t"
|
||||
"stbcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lbarx %0,%y2\n\t"
|
||||
"add %1,%0,%3\n\t"
|
||||
"stbcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lbarx %0,%y2\n\t"
|
||||
"sub %1,%0,%3\n\t"
|
||||
"stbcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lbarx %0,%y2\n\t"
|
||||
"and %1,%0,%3\n\t"
|
||||
"stbcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lbarx %0,%y2\n\t"
|
||||
"or %1,%0,%3\n\t"
|
||||
"stbcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lbarx %0,%y2\n\t"
|
||||
"xor %1,%0,%3\n\t"
|
||||
"stbcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lbarx %0,%y2\n\t"
|
||||
"nor %1,%0,%0\n\t"
|
||||
"stbcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lbarx %0,%y2\n\t"
|
||||
"nor %1,%0,%0\n\t"
|
||||
"stbcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
template< typename Base, bool Signed >
|
||||
struct extra_operations< Base, 1u, Signed, true > :
|
||||
public extra_operations_gcc_ppc_common< extra_operations_gcc_ppc< Base, 1u, Signed > >
|
||||
{
|
||||
};
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
|
||||
|
||||
template< typename Base, bool Signed >
|
||||
struct extra_operations_gcc_ppc< Base, 2u, Signed > :
|
||||
public extra_operations_generic< Base, 2u, Signed >
|
||||
{
|
||||
using base_type = extra_operations_generic< Base, 2u, Signed >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lharx %0,%y2\n\t"
|
||||
"neg %1,%0\n\t"
|
||||
"sthcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lharx %0,%y2\n\t"
|
||||
"neg %1,%0\n\t"
|
||||
"sthcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lharx %0,%y2\n\t"
|
||||
"add %1,%0,%3\n\t"
|
||||
"sthcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lharx %0,%y2\n\t"
|
||||
"sub %1,%0,%3\n\t"
|
||||
"sthcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lharx %0,%y2\n\t"
|
||||
"and %1,%0,%3\n\t"
|
||||
"sthcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lharx %0,%y2\n\t"
|
||||
"or %1,%0,%3\n\t"
|
||||
"sthcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lharx %0,%y2\n\t"
|
||||
"xor %1,%0,%3\n\t"
|
||||
"sthcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lharx %0,%y2\n\t"
|
||||
"nor %1,%0,%0\n\t"
|
||||
"sthcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lharx %0,%y2\n\t"
|
||||
"nor %1,%0,%0\n\t"
|
||||
"sthcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
|
||||
|
||||
template< typename Base, bool Signed >
|
||||
struct extra_operations_gcc_ppc< Base, 4u, Signed > :
|
||||
public extra_operations_generic< Base, 4u, Signed >
|
||||
{
|
||||
using base_type = extra_operations_generic< Base, 4u, Signed >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lwarx %0,%y2\n\t"
|
||||
"neg %1,%0\n\t"
|
||||
"stwcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lwarx %0,%y2\n\t"
|
||||
"neg %1,%0\n\t"
|
||||
"stwcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lwarx %0,%y2\n\t"
|
||||
"add %1,%0,%3\n\t"
|
||||
"stwcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lwarx %0,%y2\n\t"
|
||||
"sub %1,%0,%3\n\t"
|
||||
"stwcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lwarx %0,%y2\n\t"
|
||||
"and %1,%0,%3\n\t"
|
||||
"stwcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lwarx %0,%y2\n\t"
|
||||
"or %1,%0,%3\n\t"
|
||||
"stwcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lwarx %0,%y2\n\t"
|
||||
"xor %1,%0,%3\n\t"
|
||||
"stwcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lwarx %0,%y2\n\t"
|
||||
"nor %1,%0,%0\n\t"
|
||||
"stwcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"lwarx %0,%y2\n\t"
|
||||
"nor %1,%0,%0\n\t"
|
||||
"stwcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
template< typename Base, bool Signed >
|
||||
struct extra_operations< Base, 4u, Signed, true > :
|
||||
public extra_operations_gcc_ppc_common< extra_operations_gcc_ppc< Base, 4u, Signed > >
|
||||
{
|
||||
};
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
|
||||
|
||||
template< typename Base, bool Signed >
|
||||
struct extra_operations_gcc_ppc< Base, 8u, Signed > :
|
||||
public extra_operations_generic< Base, 8u, Signed >
|
||||
{
|
||||
using base_type = extra_operations_generic< Base, 8u, Signed >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"ldarx %0,%y2\n\t"
|
||||
"neg %1,%0\n\t"
|
||||
"stdcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"ldarx %0,%y2\n\t"
|
||||
"neg %1,%0\n\t"
|
||||
"stdcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"ldarx %0,%y2\n\t"
|
||||
"add %1,%0,%3\n\t"
|
||||
"stdcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"ldarx %0,%y2\n\t"
|
||||
"sub %1,%0,%3\n\t"
|
||||
"stdcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"ldarx %0,%y2\n\t"
|
||||
"and %1,%0,%3\n\t"
|
||||
"stdcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"ldarx %0,%y2\n\t"
|
||||
"or %1,%0,%3\n\t"
|
||||
"stdcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"ldarx %0,%y2\n\t"
|
||||
"xor %1,%0,%3\n\t"
|
||||
"stdcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
: "b" (v)
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"ldarx %0,%y2\n\t"
|
||||
"nor %1,%0,%0\n\t"
|
||||
"stdcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return original;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_TSAN_RELEASE(&storage, order);
|
||||
core_arch_operations_gcc_ppc_base::fence_before(order);
|
||||
storage_type original, result;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
|
||||
"ldarx %0,%y2\n\t"
|
||||
"nor %1,%0,%0\n\t"
|
||||
"stdcx. %1,%y2\n\t"
|
||||
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
|
||||
: "=&b" (original), "=&b" (result), "+Z" (storage)
|
||||
:
|
||||
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
|
||||
);
|
||||
core_arch_operations_gcc_ppc_base::fence_after(order);
|
||||
BOOST_ATOMIC_DETAIL_TSAN_ACQUIRE(&storage, order);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
template< typename Base, bool Signed >
|
||||
struct extra_operations< Base, 8u, Signed, true > :
|
||||
public extra_operations_gcc_ppc_common< extra_operations_gcc_ppc< Base, 8u, Signed > >
|
||||
{
|
||||
};
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_PPC_INCLUDED_
|
||||
1956
include/boost/atomic/detail/extra_ops_gcc_x86.hpp
Normal file
1956
include/boost/atomic/detail/extra_ops_gcc_x86.hpp
Normal file
File diff suppressed because it is too large
Load Diff
410
include/boost/atomic/detail/extra_ops_generic.hpp
Normal file
410
include/boost/atomic/detail/extra_ops_generic.hpp
Normal file
@@ -0,0 +1,410 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2015-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extra_ops_generic.hpp
|
||||
*
|
||||
* This header contains generic implementation of the extra atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GENERIC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GENERIC_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/integral_conversions.hpp>
|
||||
#include <boost/atomic/detail/extra_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Generic implementation of extra operations
|
||||
template< typename Base, std::size_t Size, bool Signed, bool = Base::full_cas_based >
|
||||
struct extra_operations_generic :
|
||||
public Base
|
||||
{
|
||||
using base_type = Base;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
using emulated_storage_type = typename storage_traits< Size >::type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
while (!base_type::compare_exchange_weak(
|
||||
storage, old_val, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val)), order, memory_order_relaxed))
|
||||
{
|
||||
}
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
do
|
||||
{
|
||||
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val));
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return base_type::fetch_add(storage, v, order) + v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return base_type::fetch_sub(storage, v, order) - v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return base_type::fetch_and(storage, v, order) & v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return base_type::fetch_or(storage, v, order) | v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return base_type::fetch_xor(storage, v, order) ^ v;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return base_type::fetch_xor(
|
||||
storage, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u))), order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
const storage_type mask =
|
||||
atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u)));
|
||||
return base_type::fetch_xor(storage, mask, order) ^ mask;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_add(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_sub(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
fetch_negate(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_and(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_or(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_xor(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
fetch_complement(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!static_cast< emulated_storage_type >(add(storage, v, order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!static_cast< emulated_storage_type >(sub(storage, v, order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!negate(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!bitwise_and(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!bitwise_or(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!bitwise_xor(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!static_cast< emulated_storage_type >(bitwise_complement(storage, order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
const storage_type mask =
|
||||
atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
|
||||
storage_type old_val = base_type::fetch_or(storage, mask, order);
|
||||
return !!(old_val & mask);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
const storage_type mask =
|
||||
atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
|
||||
storage_type old_val = base_type::fetch_and(storage, ~mask, order);
|
||||
return !!(old_val & mask);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
const storage_type mask =
|
||||
atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
|
||||
storage_type old_val = base_type::fetch_xor(storage, mask, order);
|
||||
return !!(old_val & mask);
|
||||
}
|
||||
};
|
||||
|
||||
//! Specialization for cases when the platform only natively supports CAS
|
||||
template< typename Base, std::size_t Size, bool Signed >
|
||||
struct extra_operations_generic< Base, Size, Signed, true > :
|
||||
public Base
|
||||
{
|
||||
using base_type = Base;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
using emulated_storage_type = typename storage_traits< Size >::type;
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
while (!base_type::compare_exchange_weak(
|
||||
storage, old_val, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val)), order, memory_order_relaxed))
|
||||
{
|
||||
}
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
do
|
||||
{
|
||||
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val));
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
do
|
||||
{
|
||||
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val + v));
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
do
|
||||
{
|
||||
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val - v));
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
do
|
||||
{
|
||||
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val & v));
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
do
|
||||
{
|
||||
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val | v));
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_val);
|
||||
do
|
||||
{
|
||||
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val ^ v));
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return base_type::fetch_xor(
|
||||
storage, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u))), order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return bitwise_xor(
|
||||
storage, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u))), order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_add(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_sub(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
fetch_negate(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_and(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_or(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
base_type::fetch_xor(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
fetch_complement(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!static_cast< emulated_storage_type >(add(storage, v, order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!static_cast< emulated_storage_type >(sub(storage, v, order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!negate(storage, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!bitwise_and(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!bitwise_or(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) noexcept
|
||||
{
|
||||
return !!bitwise_xor(storage, v, order);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) noexcept
|
||||
{
|
||||
return !!static_cast< emulated_storage_type >(bitwise_complement(storage, order));
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
const storage_type mask =
|
||||
atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
|
||||
storage_type old_val = base_type::fetch_or(storage, mask, order);
|
||||
return !!(old_val & mask);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
const storage_type mask =
|
||||
atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
|
||||
storage_type old_val = base_type::fetch_and(storage, ~mask, order);
|
||||
return !!(old_val & mask);
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
const storage_type mask =
|
||||
atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
|
||||
storage_type old_val = base_type::fetch_xor(storage, mask, order);
|
||||
return !!(old_val & mask);
|
||||
}
|
||||
};
|
||||
|
||||
// Default extra_operations template definition will be used unless specialized for a specific platform
|
||||
template< typename Base, std::size_t Size, bool Signed >
|
||||
struct extra_operations< Base, Size, Signed, true > :
|
||||
public extra_operations_generic< Base, Size, Signed >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GENERIC_HPP_INCLUDED_
|
||||
108
include/boost/atomic/detail/extra_ops_msvc_arm.hpp
Normal file
108
include/boost/atomic/detail/extra_ops_msvc_arm.hpp
Normal file
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2017-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/extra_ops_msvc_arm.hpp
|
||||
*
|
||||
* This header contains implementation of the extra atomic operations for ARM.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/interlocked.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/extra_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/extra_ops_generic.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR)
|
||||
|
||||
template< typename Base, std::size_t Size, bool Signed >
|
||||
struct extra_operations< Base, 4u, Signed, true > :
|
||||
public extra_operations_generic< Base, 4u, Signed >
|
||||
{
|
||||
using base_type = extra_operations_generic< Base, 4u, Signed >;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED) && defined(BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE) && defined(BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE)
|
||||
bool result;
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
result = !!BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED(&storage, bit_number);
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
result = !!BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE(&storage, bit_number);
|
||||
break;
|
||||
case memory_order_release:
|
||||
result = !!BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE(&storage, bit_number);
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
result = !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
#else
|
||||
return !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
|
||||
#endif
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) noexcept
|
||||
{
|
||||
#if defined(BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED) && defined(BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE) && defined(BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE)
|
||||
bool result;
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_relaxed:
|
||||
result = !!BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED(&storage, bit_number);
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
result = !!BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE(&storage, bit_number);
|
||||
break;
|
||||
case memory_order_release:
|
||||
result = !!BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE(&storage, bit_number);
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
default:
|
||||
result = !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
#else
|
||||
return !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR)
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
|
||||
1328
include/boost/atomic/detail/extra_ops_msvc_x86.hpp
Normal file
1328
include/boost/atomic/detail/extra_ops_msvc_x86.hpp
Normal file
File diff suppressed because it is too large
Load Diff
41
include/boost/atomic/detail/fence_arch_operations.hpp
Normal file
41
include/boost/atomic/detail/fence_arch_operations.hpp
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_arch_operations.hpp
|
||||
*
|
||||
* This header defines architecture-specific fence atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPERATIONS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPERATIONS_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/platform.hpp>
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
|
||||
#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/fence_arch_ops_)
|
||||
#else
|
||||
#include <boost/atomic/detail/fence_operations_emulated.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
using fence_arch_operations = fence_operations_emulated;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPERATIONS_HPP_INCLUDED_
|
||||
59
include/boost/atomic/detail/fence_arch_ops_gcc_aarch32.hpp
Normal file
59
include/boost/atomic/detail/fence_arch_ops_gcc_aarch32.hpp
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_arch_ops_gcc_aarch32.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_arch_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/capabilities.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations for AArch32
|
||||
struct fence_arch_operations_gcc_aarch32
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
{
|
||||
if (order == memory_order_consume || order == memory_order_acquire)
|
||||
__asm__ __volatile__ ("dmb ishld\n\t" ::: "memory");
|
||||
else
|
||||
__asm__ __volatile__ ("dmb ish\n\t" ::: "memory");
|
||||
}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
using fence_arch_operations = fence_arch_operations_gcc_aarch32;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
|
||||
58
include/boost/atomic/detail/fence_arch_ops_gcc_aarch64.hpp
Normal file
58
include/boost/atomic/detail/fence_arch_ops_gcc_aarch64.hpp
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_arch_ops_gcc_aarch64.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_arch_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations for AArch64
|
||||
struct fence_arch_operations_gcc_aarch64
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
{
|
||||
if (order == memory_order_consume || order == memory_order_acquire)
|
||||
__asm__ __volatile__ ("dmb ishld\n\t" ::: "memory");
|
||||
else
|
||||
__asm__ __volatile__ ("dmb ish\n\t" ::: "memory");
|
||||
}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
using fence_arch_operations = fence_arch_operations_gcc_aarch64;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
|
||||
53
include/boost/atomic/detail/fence_arch_ops_gcc_alpha.hpp
Normal file
53
include/boost/atomic/detail/fence_arch_ops_gcc_alpha.hpp
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_arch_ops_gcc_alpha.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_arch_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations for Alpha
|
||||
struct fence_arch_operations_gcc_alpha
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
__asm__ __volatile__ ("mb" ::: "memory");
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
using fence_arch_operations = fence_arch_operations_gcc_alpha;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
|
||||
90
include/boost/atomic/detail/fence_arch_ops_gcc_arm.hpp
Normal file
90
include/boost/atomic/detail/fence_arch_ops_gcc_arm.hpp
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_arch_ops_gcc_arm.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_arch_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
|
||||
|
||||
#include <cstdint>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/capabilities.hpp>
|
||||
#include <boost/atomic/detail/gcc_arm_asm_common.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations for legacy ARM
|
||||
struct fence_arch_operations_gcc_arm
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
hardware_full_fence();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void hardware_full_fence() noexcept
|
||||
{
|
||||
// A memory barrier is effected using a "co-processor 15" instruction,
|
||||
// though a separate assembler mnemonic is available for it in v7.
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)
|
||||
// Older binutils (supposedly, older than 2.21.1) didn't support symbolic or numeric arguments of the "dmb" instruction such as "ish" or "#11".
|
||||
// As a workaround we have to inject encoded bytes of the instruction. There are two encodings for the instruction: ARM and Thumb. See
|
||||
// ARM Architecture Reference Manual, A8.8.43. Since we cannot detect binutils version at compile time, we'll have to always use this hack.
|
||||
__asm__ __volatile__
|
||||
(
|
||||
#if defined(__thumb2__)
|
||||
".short 0xF3BF, 0x8F5B\n\t" // dmb ish
|
||||
#else
|
||||
".word 0xF57FF05B\n\t" // dmb ish
|
||||
#endif
|
||||
:
|
||||
:
|
||||
: "memory"
|
||||
);
|
||||
#else
|
||||
std::uint32_t tmp;
|
||||
__asm__ __volatile__
|
||||
(
|
||||
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
|
||||
"mcr p15, 0, r0, c7, c10, 5\n\t"
|
||||
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
|
||||
: "=&l" (tmp)
|
||||
:
|
||||
: "memory"
|
||||
);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
using fence_arch_operations = fence_arch_operations_gcc_arm;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
|
||||
68
include/boost/atomic/detail/fence_arch_ops_gcc_ppc.hpp
Normal file
68
include/boost/atomic/detail/fence_arch_ops_gcc_ppc.hpp
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_arch_ops_gcc_ppc.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_arch_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations for PowerPC
|
||||
struct fence_arch_operations_gcc_ppc
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
{
|
||||
#if defined(__powerpc64__) || defined(__PPC64__)
|
||||
if (order != memory_order_seq_cst)
|
||||
__asm__ __volatile__ ("lwsync" ::: "memory");
|
||||
else
|
||||
__asm__ __volatile__ ("sync" ::: "memory");
|
||||
#else
|
||||
__asm__ __volatile__ ("sync" ::: "memory");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
{
|
||||
#if defined(__ibmxl__) || defined(__IBMCPP__)
|
||||
__fence();
|
||||
#else
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
using fence_arch_operations = fence_arch_operations_gcc_ppc;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
|
||||
70
include/boost/atomic/detail/fence_arch_ops_gcc_sparc.hpp
Normal file
70
include/boost/atomic/detail/fence_arch_ops_gcc_sparc.hpp
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_arch_ops_gcc_sparc.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_arch_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations for SPARC
|
||||
struct fence_arch_operations_gcc_sparc
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
switch (order)
|
||||
{
|
||||
case memory_order_release:
|
||||
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
__asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory");
|
||||
break;
|
||||
case memory_order_acq_rel:
|
||||
__asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory");
|
||||
break;
|
||||
case memory_order_seq_cst:
|
||||
__asm__ __volatile__ ("membar #Sync" ::: "memory");
|
||||
break;
|
||||
case memory_order_relaxed:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
using fence_arch_operations = fence_arch_operations_gcc_sparc;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
|
||||
69
include/boost/atomic/detail/fence_arch_ops_gcc_x86.hpp
Normal file
69
include/boost/atomic/detail/fence_arch_ops_gcc_x86.hpp
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_arch_ops_gcc_x86.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_arch_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations for x86
|
||||
struct fence_arch_operations_gcc_x86
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
{
|
||||
// We could generate mfence for a seq_cst fence here, but a dummy lock-prefixed instruction is enough
|
||||
// and is faster than mfence on most modern x86 CPUs (as of 2020).
|
||||
// Note that we want to apply the atomic operation on any location so that:
|
||||
// - It is not shared with other threads. A variable on the stack suits this well.
|
||||
// - It is likely in cache. Being close to the top of the stack fits this well.
|
||||
// - It does not alias existing data on the stack, so that we don't introduce a false data dependency.
|
||||
// See some performance data here: https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
|
||||
// Unfortunately, to make tools like valgrind happy, we have to initialize the dummy, which is
|
||||
// otherwise not needed.
|
||||
unsigned char dummy = 0u;
|
||||
__asm__ __volatile__ ("lock; notb %0" : "+m" (dummy) : : "memory");
|
||||
}
|
||||
else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_release))) != 0u)
|
||||
{
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
using fence_arch_operations = fence_arch_operations_gcc_x86;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
|
||||
66
include/boost/atomic/detail/fence_arch_ops_msvc_arm.hpp
Normal file
66
include/boost/atomic/detail/fence_arch_ops_msvc_arm.hpp
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_arch_ops_msvc_arm.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_arch_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/ops_msvc_common.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
extern "C" void __dmb(unsigned int);
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(__dmb)
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations for ARM
|
||||
struct fence_arch_operations_msvc_arm
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
if (order != memory_order_relaxed)
|
||||
hardware_full_fence();
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void hardware_full_fence() noexcept
|
||||
{
|
||||
__dmb(0xB); // _ARM_BARRIER_ISH, see armintr.h from MSVC 11 and later
|
||||
}
|
||||
};
|
||||
|
||||
using fence_arch_operations = fence_arch_operations_msvc_arm;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
|
||||
66
include/boost/atomic/detail/fence_arch_ops_msvc_x86.hpp
Normal file
66
include/boost/atomic/detail/fence_arch_ops_msvc_x86.hpp
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_arch_ops_msvc_x86.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_arch_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
|
||||
|
||||
#include <cstdint>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/interlocked.hpp>
|
||||
#include <boost/atomic/detail/ops_msvc_common.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations for x86
|
||||
struct fence_arch_operations_msvc_x86
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
{
|
||||
// See the comment in fence_ops_gcc_x86.hpp as to why we're not using mfence here.
|
||||
// We're not using __faststorefence() here because it generates an atomic operation
|
||||
// on [rsp]/[esp] location, which may alias valid data and cause false data dependency.
|
||||
std::uint32_t dummy;
|
||||
BOOST_ATOMIC_INTERLOCKED_INCREMENT(&dummy);
|
||||
}
|
||||
else if (order != memory_order_relaxed)
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
};
|
||||
|
||||
using fence_arch_operations = fence_arch_operations_msvc_x86;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
|
||||
41
include/boost/atomic/detail/fence_operations.hpp
Normal file
41
include/boost/atomic/detail/fence_operations.hpp
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_operations.hpp
|
||||
*
|
||||
* This header defines fence atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/platform.hpp>
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER)
|
||||
#include BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER(boost/atomic/detail/fence_ops_)
|
||||
#else
|
||||
#include <boost/atomic/detail/fence_arch_operations.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
using fence_operations = fence_arch_operations;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_HPP_INCLUDED_
|
||||
50
include/boost/atomic/detail/fence_operations_emulated.hpp
Normal file
50
include/boost/atomic/detail/fence_operations_emulated.hpp
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_operations_emulated.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_EMULATED_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_EMULATED_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/lock_pool.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations based on lock pool
|
||||
struct fence_operations_emulated
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order) noexcept
|
||||
{
|
||||
atomics::detail::lock_pool::thread_fence();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order) noexcept
|
||||
{
|
||||
atomics::detail::lock_pool::signal_fence();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_EMULATED_HPP_INCLUDED_
|
||||
75
include/boost/atomic/detail/fence_ops_gcc_atomic.hpp
Normal file
75
include/boost/atomic/detail/fence_ops_gcc_atomic.hpp
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_ops_gcc_atomic.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_ATOMIC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_ATOMIC_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/fence_arch_operations.hpp>
|
||||
#include <boost/atomic/detail/gcc_atomic_memory_order_utils.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__INTEL_COMPILER)
|
||||
// This is used to suppress warning #32013 described in gcc_atomic_memory_order_utils.hpp
|
||||
// for Intel Compiler.
|
||||
// In debug builds the compiler does not inline any functions, so basically
|
||||
// every atomic function call results in this warning. I don't know any other
|
||||
// way to selectively disable just this one warning.
|
||||
#pragma system_header
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations based on gcc __atomic* intrinsics
|
||||
struct fence_operations_gcc_atomic
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_TSAN) && (defined(__x86_64__) || defined(__i386__))
|
||||
if (order != memory_order_seq_cst)
|
||||
{
|
||||
__atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
|
||||
}
|
||||
else
|
||||
{
|
||||
// gcc, clang, icc and probably other compilers generate mfence for a seq_cst fence,
|
||||
// while a dummy lock-prefixed instruction would be enough and faster. See the comment in fence_ops_gcc_x86.hpp.
|
||||
fence_arch_operations::thread_fence(order);
|
||||
}
|
||||
#else
|
||||
__atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
|
||||
#endif
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
__atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));
|
||||
}
|
||||
};
|
||||
|
||||
using fence_operations = fence_operations_gcc_atomic;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_ATOMIC_HPP_INCLUDED_
|
||||
53
include/boost/atomic/detail/fence_ops_gcc_sync.hpp
Normal file
53
include/boost/atomic/detail/fence_ops_gcc_sync.hpp
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_ops_gcc_sync.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_SYNC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_SYNC_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations based on gcc __sync* intrinsics
|
||||
struct fence_operations_gcc_sync
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
__sync_synchronize();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
};
|
||||
|
||||
using fence_operations = fence_operations_gcc_sync;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_SYNC_HPP_INCLUDED_
|
||||
64
include/boost/atomic/detail/fence_ops_linux_arm.hpp
Normal file
64
include/boost/atomic/detail/fence_ops_linux_arm.hpp
Normal file
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009, 2011 Helge Bahmann
|
||||
* Copyright (c) 2009 Phil Endecott
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Linux-specific code by Phil Endecott
|
||||
* Copyright (c) 2014-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_ops_linux_arm.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_LINUX_ARM_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_OPS_LINUX_ARM_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations based on Linux-specific system routines
|
||||
struct fence_operations_linux_arm
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
hardware_full_fence();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void hardware_full_fence() noexcept
|
||||
{
|
||||
// See the comment in core_ops_linux_arm.hpp regarding the function pointer below
|
||||
using kernel_dmb_t = void (void);
|
||||
((kernel_dmb_t*)0xffff0fa0)();
|
||||
}
|
||||
};
|
||||
|
||||
using fence_operations = fence_operations_linux_arm;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_LINUX_ARM_HPP_INCLUDED_
|
||||
67
include/boost/atomic/detail/fence_ops_windows.hpp
Normal file
67
include/boost/atomic/detail/fence_ops_windows.hpp
Normal file
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fence_ops_windows.hpp
|
||||
*
|
||||
* This header contains implementation of the \c fence_operations struct.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_WINDOWS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FENCE_OPS_WINDOWS_HPP_INCLUDED_
|
||||
|
||||
#include <cstdint>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/interlocked.hpp>
|
||||
#include <boost/atomic/detail/ops_msvc_common.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Fence operations based on Windows-specific system calls or intrinsics
|
||||
struct fence_operations_windows
|
||||
{
|
||||
static BOOST_FORCEINLINE void thread_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
if (order == memory_order_seq_cst)
|
||||
hardware_full_fence();
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void signal_fence(memory_order order) noexcept
|
||||
{
|
||||
if (order != memory_order_relaxed)
|
||||
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void hardware_full_fence() noexcept
|
||||
{
|
||||
std::uint32_t tmp;
|
||||
BOOST_ATOMIC_INTERLOCKED_INCREMENT(&tmp);
|
||||
}
|
||||
};
|
||||
|
||||
using fence_operations = fence_operations_windows;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_WINDOWS_HPP_INCLUDED_
|
||||
229
include/boost/atomic/detail/float_sizes.hpp
Normal file
229
include/boost/atomic/detail/float_sizes.hpp
Normal file
@@ -0,0 +1,229 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018-2023 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/float_sizes.hpp
|
||||
*
|
||||
* This header defines macros for testing buitin floating point type sizes
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FLOAT_SIZES_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FLOAT_SIZES_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
// Detect value sizes of the different floating point types. The value sizes may be less than the corresponding type sizes
|
||||
// if the type contains padding bits. This is typical e.g. with x87 80-bit extended double types, which are often represented as 96 or 128-bit types.
|
||||
// See: https://en.wikipedia.org/wiki/IEEE_754
|
||||
// For Intel x87 extended double see: https://en.wikipedia.org/wiki/Extended_precision#x86_Architecture_Extended_Precision_Format
|
||||
// For IBM extended double (a.k.a. double-double) see: https://en.wikipedia.org/wiki/Long_double#Implementations, https://gcc.gnu.org/wiki/Ieee128PowerPC
|
||||
|
||||
#if defined(__FLT_RADIX__) && defined(__FLT_MANT_DIG__) && defined(__FLT_MAX_EXP__) && \
|
||||
defined(__DBL_MANT_DIG__) && defined(__DBL_MAX_EXP__) && defined(__LDBL_MANT_DIG__) && defined(__LDBL_MAX_EXP__)
|
||||
|
||||
#if (__FLT_RADIX__ == 2)
|
||||
|
||||
#if (__FLT_MANT_DIG__ == 11) && (__FLT_MAX_EXP__ == 16) // IEEE 754 binary16
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 2
|
||||
#elif (__FLT_MANT_DIG__ == 24) && (__FLT_MAX_EXP__ == 128) // IEEE 754 binary32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 4
|
||||
#elif (__FLT_MANT_DIG__ == 53) && (__FLT_MAX_EXP__ == 1024) // IEEE 754 binary64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 8
|
||||
#elif (__FLT_MANT_DIG__ == 64 || __FLT_MANT_DIG__ == 53 || __FLT_MANT_DIG__ == 24) && (__FLT_MAX_EXP__ == 16384) // x87 extended double, with full 64-bit significand or reduced to 53 or 24 bits
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 10
|
||||
#elif (__FLT_MANT_DIG__ == 106) && (__FLT_MAX_EXP__ == 1024) // IBM extended double
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
|
||||
#elif (__FLT_MANT_DIG__ == 113) && (__FLT_MAX_EXP__ == 16384) // IEEE 754 binary128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
|
||||
#elif (__FLT_MANT_DIG__ == 237) && (__FLT_MAX_EXP__ == 262144) // IEEE 754 binary256
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 32
|
||||
#endif
|
||||
|
||||
#if (__DBL_MANT_DIG__ == 11) && (__DBL_MAX_EXP__ == 16) // IEEE 754 binary16
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 2
|
||||
#elif (__DBL_MANT_DIG__ == 24) && (__DBL_MAX_EXP__ == 128) // IEEE 754 binary32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 4
|
||||
#elif (__DBL_MANT_DIG__ == 53) && (__DBL_MAX_EXP__ == 1024) // IEEE 754 binary64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 8
|
||||
#elif (__DBL_MANT_DIG__ == 64 || __DBL_MANT_DIG__ == 53 || __DBL_MANT_DIG__ == 24) && (__DBL_MAX_EXP__ == 16384) // x87 extended double, with full 64-bit significand or reduced to 53 or 24 bits
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 10
|
||||
#elif (__DBL_MANT_DIG__ == 106) && (__DBL_MAX_EXP__ == 1024) // IBM extended double
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
|
||||
#elif (__DBL_MANT_DIG__ == 113) && (__DBL_MAX_EXP__ == 16384) // IEEE 754 binary128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
|
||||
#elif (__DBL_MANT_DIG__ == 237) && (__DBL_MAX_EXP__ == 262144) // IEEE 754 binary256
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 32
|
||||
#endif
|
||||
|
||||
#if (__LDBL_MANT_DIG__ == 11) && (__LDBL_MAX_EXP__ == 16) // IEEE 754 binary16
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 2
|
||||
#elif (__LDBL_MANT_DIG__ == 24) && (__LDBL_MAX_EXP__ == 128) // IEEE 754 binary32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 4
|
||||
#elif (__LDBL_MANT_DIG__ == 53) && (__LDBL_MAX_EXP__ == 1024) // IEEE 754 binary64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 8
|
||||
#elif (__LDBL_MANT_DIG__ == 64 || __LDBL_MANT_DIG__ == 53 || __LDBL_MANT_DIG__ == 24) && (__LDBL_MAX_EXP__ == 16384) // x87 extended double, with full 64-bit significand or reduced to 53 or 24 bits
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 10
|
||||
#elif (__LDBL_MANT_DIG__ == 106) && (__LDBL_MAX_EXP__ == 1024) // IBM extended double
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
|
||||
#elif (__LDBL_MANT_DIG__ == 113) && (__LDBL_MAX_EXP__ == 16384) // IEEE 754 binary128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
|
||||
#elif (__LDBL_MANT_DIG__ == 237) && (__LDBL_MAX_EXP__ == 262144) // IEEE 754 binary256
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 32
|
||||
#endif
|
||||
|
||||
#elif (__FLT_RADIX__ == 10)
|
||||
|
||||
#if (__FLT_MANT_DIG__ == 7) && (__FLT_MAX_EXP__ == 97) // IEEE 754 decimal32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 4
|
||||
#elif (__FLT_MANT_DIG__ == 16) && (__FLT_MAX_EXP__ == 385) // IEEE 754 decimal64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 8
|
||||
#elif (__FLT_MANT_DIG__ == 34) && (__FLT_MAX_EXP__ == 6145) // IEEE 754 decimal128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
|
||||
#endif
|
||||
|
||||
#if (__DBL_MANT_DIG__ == 7) && (__DBL_MAX_EXP__ == 97) // IEEE 754 decimal32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 4
|
||||
#elif (__DBL_MANT_DIG__ == 16) && (__DBL_MAX_EXP__ == 385) // IEEE 754 decimal64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 8
|
||||
#elif (__DBL_MANT_DIG__ == 34) && (__DBL_MAX_EXP__ == 6145) // IEEE 754 decimal128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
|
||||
#endif
|
||||
|
||||
#if (__LDBL_MANT_DIG__ == 7) && (__LDBL_MAX_EXP__ == 97) // IEEE 754 decimal32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 4
|
||||
#elif (__LDBL_MANT_DIG__ == 16) && (__LDBL_MAX_EXP__ == 385) // IEEE 754 decimal64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 8
|
||||
#elif (__LDBL_MANT_DIG__ == 34) && (__LDBL_MAX_EXP__ == 6145) // IEEE 754 decimal128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#else // defined(__FLT_RADIX__) ...
|
||||
|
||||
#include <cfloat>
|
||||
|
||||
#if (FLT_RADIX == 2)
|
||||
|
||||
#if (FLT_MANT_DIG == 11) && (FLT_MAX_EXP == 16) // IEEE 754 binary16
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 2
|
||||
#elif (FLT_MANT_DIG == 24) && (FLT_MAX_EXP == 128) // IEEE 754 binary32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 4
|
||||
#elif (FLT_MANT_DIG == 53) && (FLT_MAX_EXP == 1024) // IEEE 754 binary64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 8
|
||||
#elif (FLT_MANT_DIG == 64 || FLT_MANT_DIG == 53 || FLT_MANT_DIG == 24) && (FLT_MAX_EXP == 16384) // x87 extended double, with full 64-bit significand or reduced to 53 or 24 bits
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 10
|
||||
#elif (FLT_MANT_DIG == 106) && (FLT_MAX_EXP == 1024) // IBM extended double
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
|
||||
#elif (FLT_MANT_DIG == 113) && (FLT_MAX_EXP == 16384) // IEEE 754 binary128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
|
||||
#elif (FLT_MANT_DIG == 237) && (FLT_MAX_EXP == 262144) // IEEE 754 binary256
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 32
|
||||
#endif
|
||||
|
||||
#if (DBL_MANT_DIG == 11) && (DBL_MAX_EXP == 16) // IEEE 754 binary16
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 2
|
||||
#elif (DBL_MANT_DIG == 24) && (DBL_MAX_EXP == 128) // IEEE 754 binary32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 4
|
||||
#elif (DBL_MANT_DIG == 53) && (DBL_MAX_EXP == 1024) // IEEE 754 binary64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 8
|
||||
#elif (DBL_MANT_DIG == 64 || DBL_MANT_DIG == 53 || DBL_MANT_DIG == 24) && (DBL_MAX_EXP == 16384) // x87 extended double, with full 64-bit significand or reduced to 53 or 24 bits
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 10
|
||||
#elif (DBL_MANT_DIG == 106) && (DBL_MAX_EXP == 1024) // IBM extended double
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
|
||||
#elif (DBL_MANT_DIG == 113) && (DBL_MAX_EXP == 16384) // IEEE 754 binary128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
|
||||
#elif (DBL_MANT_DIG == 237) && (DBL_MAX_EXP == 262144) // IEEE 754 binary256
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 32
|
||||
#endif
|
||||
|
||||
#if (LDBL_MANT_DIG == 11) && (LDBL_MAX_EXP == 16) // IEEE 754 binary16
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 2
|
||||
#elif (LDBL_MANT_DIG == 24) && (LDBL_MAX_EXP == 128) // IEEE 754 binary32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 4
|
||||
#elif (LDBL_MANT_DIG == 53) && (LDBL_MAX_EXP == 1024) // IEEE 754 binary64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 8
|
||||
#elif (LDBL_MANT_DIG == 64 || LDBL_MANT_DIG == 53 || LDBL_MANT_DIG == 24) && (LDBL_MAX_EXP == 16384) // x87 extended double, with full 64-bit significand or reduced to 53 or 24 bits
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 10
|
||||
#elif (LDBL_MANT_DIG == 106) && (LDBL_MAX_EXP == 1024) // IBM extended double
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
|
||||
#elif (LDBL_MANT_DIG == 113) && (LDBL_MAX_EXP == 16384) // IEEE 754 binary128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
|
||||
#elif (LDBL_MANT_DIG == 237) && (LDBL_MAX_EXP == 262144) // IEEE 754 binary256
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 32
|
||||
#endif
|
||||
|
||||
#elif (FLT_RADIX == 10)
|
||||
|
||||
#if (FLT_MANT_DIG == 7) && (FLT_MAX_EXP == 97) // IEEE 754 decimal32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 4
|
||||
#elif (FLT_MANT_DIG == 16) && (FLT_MAX_EXP == 385) // IEEE 754 decimal64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 8
|
||||
#elif (FLT_MANT_DIG == 34) && (FLT_MAX_EXP == 6145) // IEEE 754 decimal128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
|
||||
#endif
|
||||
|
||||
#if (DBL_MANT_DIG == 7) && (DBL_MAX_EXP == 97) // IEEE 754 decimal32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 4
|
||||
#elif (DBL_MANT_DIG == 16) && (DBL_MAX_EXP == 385) // IEEE 754 decimal64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 8
|
||||
#elif (DBL_MANT_DIG == 34) && (DBL_MAX_EXP == 6145) // IEEE 754 decimal128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
|
||||
#endif
|
||||
|
||||
#if (LDBL_MANT_DIG == 7) && (LDBL_MAX_EXP == 97) // IEEE 754 decimal32
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 4
|
||||
#elif (LDBL_MANT_DIG == 16) && (LDBL_MAX_EXP == 385) // IEEE 754 decimal64
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 8
|
||||
#elif (LDBL_MANT_DIG == 34) && (LDBL_MAX_EXP == 6145) // IEEE 754 decimal128
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif // defined(__FLT_RADIX__) ...
|
||||
|
||||
// GCC and compatible compilers define internal macros with builtin type traits
|
||||
#if defined(__SIZEOF_FLOAT__)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT __SIZEOF_FLOAT__
|
||||
#endif
|
||||
#if defined(__SIZEOF_DOUBLE__)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE __SIZEOF_DOUBLE__
|
||||
#endif
|
||||
#if defined(__SIZEOF_LONG_DOUBLE__)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE __SIZEOF_LONG_DOUBLE__
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
|
||||
|
||||
#define BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(x)\
|
||||
((x) == 1u ? 1u : ((x) == 2u ? 2u : ((x) <= 4u ? 4u : ((x) <= 8u ? 8u : ((x) <= 16u ? 16u : ((x) <= 32u ? 32u : (x)))))))
|
||||
|
||||
// Make our best guess. These sizes may not be accurate, but they are good enough to estimate the size of the storage required to hold these types.
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE)
|
||||
#endif
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE)
|
||||
#endif
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE)
|
||||
#endif
|
||||
|
||||
#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) ||\
|
||||
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) ||\
|
||||
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
|
||||
#error Boost.Atomic: Failed to determine builtin floating point type sizes, the target platform is not supported. Please, report to the developers (patches are welcome).
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FLOAT_SIZES_HPP_INCLUDED_
|
||||
24
include/boost/atomic/detail/footer.hpp
Normal file
24
include/boost/atomic/detail/footer.hpp
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* Copyright Andrey Semashev 2020.
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*/
|
||||
|
||||
#if !defined(BOOST_ATOMIC_ENABLE_WARNINGS)
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
|
||||
#pragma warning(pop)
|
||||
|
||||
#elif defined(BOOST_GCC) && BOOST_GCC >= 40600
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
#elif defined(BOOST_CLANG)
|
||||
|
||||
#pragma clang diagnostic pop
|
||||
|
||||
#endif
|
||||
|
||||
#endif // !defined(BOOST_ATOMIC_ENABLE_WARNINGS)
|
||||
28
include/boost/atomic/detail/fp_operations.hpp
Normal file
28
include/boost/atomic/detail/fp_operations.hpp
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fp_operations.hpp
|
||||
*
|
||||
* This header defines floating point atomic operations, including the generic version.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FP_OPERATIONS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FP_OPERATIONS_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/fp_ops_generic.hpp>
|
||||
#include <boost/atomic/detail/fp_ops_emulated.hpp>
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_FP_BACKEND_GENERIC)
|
||||
#include BOOST_ATOMIC_DETAIL_FP_BACKEND_HEADER(boost/atomic/detail/fp_ops_)
|
||||
#endif
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FP_OPERATIONS_HPP_INCLUDED_
|
||||
38
include/boost/atomic/detail/fp_operations_fwd.hpp
Normal file
38
include/boost/atomic/detail/fp_operations_fwd.hpp
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fp_operations_fwd.hpp
|
||||
*
|
||||
* This header contains forward declaration of the \c fp_operations template.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FP_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FP_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename Base, typename Value, std::size_t Size = sizeof(typename Base::storage_type), bool = Base::is_always_lock_free >
|
||||
struct fp_operations;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FP_OPERATIONS_FWD_HPP_INCLUDED_
|
||||
77
include/boost/atomic/detail/fp_ops_emulated.hpp
Normal file
77
include/boost/atomic/detail/fp_ops_emulated.hpp
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fp_ops_emulated.hpp
|
||||
*
|
||||
* This header contains emulated (lock-based) implementation of the floating point atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FP_OPS_EMULATED_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FP_OPS_EMULATED_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
|
||||
#include <boost/atomic/detail/fp_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Emulated implementation of floating point operations
|
||||
template< typename Base, typename Value, std::size_t Size >
|
||||
struct fp_operations_emulated :
|
||||
public Base
|
||||
{
|
||||
using base_type = Base;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
using value_type = Value;
|
||||
using scoped_lock = typename base_type::scoped_lock;
|
||||
|
||||
static value_type fetch_add(storage_type volatile& storage, value_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
|
||||
value_type new_val = old_val + v;
|
||||
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static value_type fetch_sub(storage_type volatile& storage, value_type v, memory_order) noexcept
|
||||
{
|
||||
static_assert(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
|
||||
storage_type& s = const_cast< storage_type& >(storage);
|
||||
scoped_lock lock(&storage);
|
||||
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
|
||||
value_type new_val = old_val - v;
|
||||
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
return old_val;
|
||||
}
|
||||
};
|
||||
|
||||
template< typename Base, typename Value, std::size_t Size >
|
||||
struct fp_operations< Base, Value, Size, false > :
|
||||
public fp_operations_emulated< Base, Value, Size >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FP_OPS_EMULATED_HPP_INCLUDED_
|
||||
86
include/boost/atomic/detail/fp_ops_generic.hpp
Normal file
86
include/boost/atomic/detail/fp_ops_generic.hpp
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/fp_ops_generic.hpp
|
||||
*
|
||||
* This header contains generic implementation of the floating point atomic operations.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
|
||||
#include <boost/atomic/detail/storage_traits.hpp>
|
||||
#include <boost/atomic/detail/fp_operations_fwd.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
//! Generic implementation of floating point operations
|
||||
template< typename Base, typename Value, std::size_t Size >
|
||||
struct fp_operations_generic :
|
||||
public Base
|
||||
{
|
||||
using base_type = Base;
|
||||
using storage_type = typename base_type::storage_type;
|
||||
using value_type = Value;
|
||||
|
||||
static BOOST_FORCEINLINE value_type fetch_add(storage_type volatile& storage, value_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_storage, new_storage;
|
||||
value_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_storage);
|
||||
do
|
||||
{
|
||||
old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
|
||||
new_val = old_val + v;
|
||||
new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
|
||||
return old_val;
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE value_type fetch_sub(storage_type volatile& storage, value_type v, memory_order order) noexcept
|
||||
{
|
||||
storage_type old_storage, new_storage;
|
||||
value_type old_val, new_val;
|
||||
atomics::detail::non_atomic_load(storage, old_storage);
|
||||
do
|
||||
{
|
||||
old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
|
||||
new_val = old_val - v;
|
||||
new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
|
||||
}
|
||||
while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
|
||||
return old_val;
|
||||
}
|
||||
};
|
||||
|
||||
// Default fp_operations template definition will be used unless specialized for a specific platform
|
||||
template< typename Base, typename Value, std::size_t Size >
|
||||
struct fp_operations< Base, Value, Size, true > :
|
||||
public fp_operations_generic< Base, Value, Size >
|
||||
{
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_
|
||||
246
include/boost/atomic/detail/futex.hpp
Normal file
246
include/boost/atomic/detail/futex.hpp
Normal file
@@ -0,0 +1,246 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/futex.hpp
|
||||
*
|
||||
* This header defines wrappers around futex syscall.
|
||||
*
|
||||
* http://man7.org/linux/man-pages/man2/futex.2.html
|
||||
* https://man.openbsd.org/futex
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_FUTEX_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_FUTEX_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__linux__)
|
||||
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#if defined(SYS_futex)
|
||||
#define BOOST_ATOMIC_DETAIL_SYS_FUTEX SYS_futex
|
||||
#elif defined(SYS_futex_time64)
|
||||
// On some 32-bit targets (e.g. riscv32) SYS_futex is not defined and instead SYS_futex_time64 is implemented,
|
||||
// which is equivalent to SYS_futex but uses 64-bit time_t.
|
||||
#define BOOST_ATOMIC_DETAIL_SYS_FUTEX SYS_futex_time64
|
||||
#define BOOST_ATOMIC_DETAIL_FUTEX_TIME64
|
||||
#elif defined(__NR_futex)
|
||||
// Some Android NDKs (Google NDK and older Crystax.NET NDK versions) don't define SYS_futex.
|
||||
#define BOOST_ATOMIC_DETAIL_SYS_FUTEX __NR_futex
|
||||
#endif
|
||||
|
||||
#elif defined(__OpenBSD__)
|
||||
|
||||
// OpenBSD provides futex(2) function wrapper since OpenBSD 6.2 (https://man.openbsd.org/OpenBSD-6.2/futex.2).
|
||||
// It has also removed syscall(2) interface:
|
||||
// https://github.com/openbsd/src/commit/cafeb892b121ee89c39c2b940e8ccd6950f50009
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <cerrno>
|
||||
|
||||
#if OpenBSD >= 201711
|
||||
#define BOOST_ATOMIC_DETAIL_OPENBSD_FUTEX
|
||||
#endif // OpenBSD >= 201711
|
||||
|
||||
#elif defined(__NETBSD__) || defined(__NetBSD__)
|
||||
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#if defined(SYS___futex)
|
||||
// NetBSD defines SYS___futex, which has slightly different parameters. Basically, it has decoupled timeout and val2 parameters:
|
||||
// int __futex(int *addr1, int op, int val1, const struct timespec *timeout, int *addr2, int val2, int val3);
|
||||
// https://ftp.netbsd.org/pub/NetBSD/NetBSD-current/src/sys/sys/syscall.h
|
||||
// http://bxr.su/NetBSD/sys/kern/sys_futex.c
|
||||
#define BOOST_ATOMIC_DETAIL_SYS_FUTEX SYS___futex
|
||||
#define BOOST_ATOMIC_DETAIL_NETBSD_FUTEX
|
||||
#endif // defined(SYS___futex)
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_SYS_FUTEX) || defined(BOOST_ATOMIC_DETAIL_OPENBSD_FUTEX)
|
||||
|
||||
#if defined(__linux__)
|
||||
#include <linux/futex.h>
|
||||
#else
|
||||
#include <sys/futex.h>
|
||||
#endif
|
||||
#include <time.h> // timespec
|
||||
#include <cstdint>
|
||||
#include <boost/atomic/detail/intptr.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#define BOOST_ATOMIC_DETAIL_HAS_FUTEX
|
||||
|
||||
// Note: On Android, futex.h is lacking many definitions, but the actual Linux kernel supports the API in full.
|
||||
#if defined(FUTEX_WAIT_BITSET)
|
||||
#define BOOST_ATOMIC_DETAIL_FUTEX_WAIT_BITSET FUTEX_WAIT_BITSET
|
||||
#elif defined(__ANDROID__)
|
||||
#define BOOST_ATOMIC_DETAIL_FUTEX_WAIT_BITSET 9
|
||||
#endif
|
||||
|
||||
#if defined(FUTEX_PRIVATE_FLAG)
|
||||
#define BOOST_ATOMIC_DETAIL_FUTEX_PRIVATE_FLAG FUTEX_PRIVATE_FLAG
|
||||
#elif defined(__ANDROID__)
|
||||
#define BOOST_ATOMIC_DETAIL_FUTEX_PRIVATE_FLAG 128
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_FUTEX_PRIVATE_FLAG 0
|
||||
#endif
|
||||
|
||||
#if defined(FUTEX_CLOCK_REALTIME)
|
||||
#define BOOST_ATOMIC_DETAIL_FUTEX_CLOCK_REALTIME FUTEX_CLOCK_REALTIME
|
||||
#elif defined(__ANDROID__)
|
||||
#define BOOST_ATOMIC_DETAIL_FUTEX_CLOCK_REALTIME 256
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_FUTEX_TIME64)
|
||||
|
||||
//! An equivalent of `timespec` that uses 64-bit members when the userland `timespec` is 32-bit
|
||||
struct futex_timespec
|
||||
{
|
||||
std::int64_t tv_sec;
|
||||
std::int64_t tv_nsec;
|
||||
|
||||
futex_timespec() = default;
|
||||
explicit futex_timespec(::timespec ts) noexcept :
|
||||
tv_sec(ts.tv_sec), tv_nsec(ts.tv_nsec)
|
||||
{}
|
||||
};
|
||||
|
||||
#else // defined(BOOST_ATOMIC_DETAIL_FUTEX_TIME64)
|
||||
|
||||
using futex_timespec = ::timespec;
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_FUTEX_TIME64)
|
||||
|
||||
//! Invokes an operation on the futex
|
||||
BOOST_FORCEINLINE int futex_invoke(void* addr1, int op, unsigned int val1, const futex_timespec* timeout = nullptr, void* addr2 = nullptr, unsigned int val3 = 0u) noexcept
|
||||
{
|
||||
#if defined(BOOST_ATOMIC_DETAIL_OPENBSD_FUTEX)
|
||||
return ::futex
|
||||
(
|
||||
static_cast< volatile std::uint32_t* >(addr1),
|
||||
op,
|
||||
static_cast< int >(val1),
|
||||
timeout,
|
||||
static_cast< volatile std::uint32_t* >(addr2)
|
||||
);
|
||||
#elif defined(BOOST_ATOMIC_DETAIL_NETBSD_FUTEX)
|
||||
// Pass 0 in val2.
|
||||
return ::syscall(BOOST_ATOMIC_DETAIL_SYS_FUTEX, addr1, op, val1, timeout, addr2, 0u, val3);
|
||||
#else
|
||||
return ::syscall(BOOST_ATOMIC_DETAIL_SYS_FUTEX, addr1, op, val1, timeout, addr2, val3);
|
||||
#endif
|
||||
}
|
||||
|
||||
//! Invokes an operation on the futex
|
||||
BOOST_FORCEINLINE int futex_invoke(void* addr1, int op, unsigned int val1, unsigned int val2, void* addr2 = nullptr, unsigned int val3 = 0u) noexcept
|
||||
{
|
||||
#if defined(BOOST_ATOMIC_DETAIL_OPENBSD_FUTEX)
|
||||
return ::futex
|
||||
(
|
||||
static_cast< volatile std::uint32_t* >(addr1),
|
||||
op,
|
||||
static_cast< int >(val1),
|
||||
reinterpret_cast< const futex_timespec* >(static_cast< atomics::detail::uintptr_t >(val2)),
|
||||
static_cast< volatile std::uint32_t* >(addr2)
|
||||
);
|
||||
#elif defined(BOOST_ATOMIC_DETAIL_NETBSD_FUTEX)
|
||||
// Pass nullptr in timeout.
|
||||
return ::syscall(BOOST_ATOMIC_DETAIL_SYS_FUTEX, addr1, op, val1, static_cast< void* >(nullptr), addr2, val2, val3);
|
||||
#else
|
||||
return ::syscall(BOOST_ATOMIC_DETAIL_SYS_FUTEX, addr1, op, val1, static_cast< atomics::detail::uintptr_t >(val2), addr2, val3);
|
||||
#endif
|
||||
}
|
||||
|
||||
//! Checks that the value \c pval is \c expected and blocks
|
||||
BOOST_FORCEINLINE int futex_wait(void* pval, unsigned int expected, int flags) noexcept
|
||||
{
|
||||
int res = futex_invoke(pval, FUTEX_WAIT | flags, expected);
|
||||
#if defined(OpenBSD) && (OpenBSD < 202111)
|
||||
// In older OpenBSD versions, futex(2) returned error code directly instead of setting errno and returning -1.
|
||||
// This was fixed in OpenBSD 7.0 (https://github.com/openbsd/src/commit/3288ea8fbfe504db25b57dd18b664a1aa377e4bf).
|
||||
// This primarily affects FUTEX_WAIT. For FUTEX_WAKE and FUTEX_REQUEUE the returned value may be positive
|
||||
// on successful completion of the call and there seem to be no errors that can be returned. Other functions
|
||||
// are not supported on OpenBSD 7.0 and older.
|
||||
if (res > 0)
|
||||
{
|
||||
errno = res;
|
||||
res = -1;
|
||||
}
|
||||
#endif // defined(OpenBSD) && (OpenBSD < 202111)
|
||||
return res;
|
||||
}
|
||||
|
||||
//! Checks that the value \c pval is \c expected and blocks until timeout
|
||||
BOOST_FORCEINLINE int futex_wait_for(void* pval, unsigned int expected, futex_timespec const& timeout, int flags) noexcept
|
||||
{
|
||||
int res = futex_invoke(pval, FUTEX_WAIT | flags, expected, &timeout);
|
||||
#if defined(OpenBSD) && (OpenBSD < 202111)
|
||||
// See the comment in futex_wait
|
||||
if (res > 0)
|
||||
{
|
||||
errno = res;
|
||||
res = -1;
|
||||
}
|
||||
#endif // defined(OpenBSD) && (OpenBSD < 202111)
|
||||
return res;
|
||||
}
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_FUTEX_WAIT_BITSET)
|
||||
|
||||
//! Checks that the value \c pval is \c expected and blocks until timeout
|
||||
BOOST_FORCEINLINE int futex_wait_until(void* pval, unsigned int expected, futex_timespec const& timeout, int flags) noexcept
|
||||
{
|
||||
return futex_invoke(pval, BOOST_ATOMIC_DETAIL_FUTEX_WAIT_BITSET | flags, expected, &timeout, nullptr, ~static_cast< unsigned int >(0u));
|
||||
}
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_FUTEX_WAIT_BITSET)
|
||||
|
||||
//! Wakes the specified number of threads waiting on the futex
|
||||
BOOST_FORCEINLINE int futex_signal(void* pval, int flags, unsigned int count = 1u) noexcept
|
||||
{
|
||||
return futex_invoke(pval, FUTEX_WAKE | flags, count);
|
||||
}
|
||||
|
||||
//! Wakes all threads waiting on the futex
|
||||
BOOST_FORCEINLINE int futex_broadcast(void* pval, int flags) noexcept
|
||||
{
|
||||
return futex_signal(pval, flags, (~static_cast< unsigned int >(0u)) >> 1u);
|
||||
}
|
||||
|
||||
//! Wakes the wake_count threads waiting on the futex pval1 and requeues up to requeue_count of the blocked threads onto another futex pval2
|
||||
BOOST_FORCEINLINE int futex_requeue
|
||||
(
|
||||
void* pval1,
|
||||
void* pval2,
|
||||
int flags,
|
||||
unsigned int wake_count = 1u,
|
||||
unsigned int requeue_count = (~static_cast< unsigned int >(0u)) >> 1u
|
||||
) noexcept
|
||||
{
|
||||
return futex_invoke(pval1, FUTEX_REQUEUE | flags, wake_count, requeue_count, pval2);
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // defined(BOOST_ATOMIC_DETAIL_SYS_FUTEX) || defined(BOOST_ATOMIC_DETAIL_OPENBSD_FUTEX)
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_FUTEX_HPP_INCLUDED_
|
||||
79
include/boost/atomic/detail/gcc_arm_asm_common.hpp
Normal file
79
include/boost/atomic/detail/gcc_arm_asm_common.hpp
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2014, 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/gcc_arm_asm_common.hpp
|
||||
*
|
||||
* This header contains basic utilities for gcc asm-based ARM backend.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_GCC_ARM_ASM_COMMON_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ARM_ASM_COMMON_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/capabilities.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
// A memory barrier is effected using a "co-processor 15" instruction,
|
||||
// though a separate assembler mnemonic is available for it in v7.
|
||||
//
|
||||
// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
|
||||
// doesn't include all instructions and in particular it doesn't include the co-processor
|
||||
// instruction used for the memory barrier or the load-locked/store-conditional
|
||||
// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
|
||||
// asm blocks with code to temporarily change to ARM mode.
|
||||
//
|
||||
// You can only change between ARM and Thumb modes when branching using the bx instruction.
|
||||
// bx takes an address specified in a register. The least significant bit of the address
|
||||
// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
|
||||
// A temporary register is needed for the address and is passed as an argument to these
|
||||
// macros. It must be one of the "low" registers accessible to Thumb code, specified
|
||||
// using the "l" attribute in the asm statement.
|
||||
//
|
||||
// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
|
||||
// instruction set. (Actually, there was an extension of v6 called v6T2 which supported
|
||||
// "Thumb 2" mode, but its architecture manual is no longer available, referring to v7.)
|
||||
// So in v7 we don't need to change to ARM mode; we can write "universal
|
||||
// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
|
||||
// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
|
||||
// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
|
||||
// so they can always be present.
|
||||
|
||||
// A note about memory_order_consume. Technically, this architecture allows to avoid
|
||||
// unnecessary memory barrier after consume load since it supports data dependency ordering.
|
||||
// However, some compiler optimizations may break a seemingly valid code relying on data
|
||||
// dependency tracking by injecting bogus branches to aid out of order execution.
|
||||
// This may happen not only in Boost.Atomic code but also in user's code, which we have no
|
||||
// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
|
||||
// For this reason we promote memory_order_consume to memory_order_acquire.
|
||||
|
||||
#if defined(__thumb__) && !defined(__thumb2__)
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 8f\n\t" "bx " #TMPREG "\n\t" ".arm\n\t" ".align 4\n\t" "8:\n\t"
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 9f + 1\n\t" "bx " #TMPREG "\n\t" ".thumb\n\t" ".align 2\n\t" "9:\n\t"
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&l" (var)
|
||||
#else
|
||||
// Indicate that start/end macros are empty and the tmpreg is not needed
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_UNUSED
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG)
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&l" (var)
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_ARM_LITTLE_ENDIAN)
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(arg) "%" BOOST_STRINGIZE(arg)
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(arg) "%H" BOOST_STRINGIZE(arg)
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_LO(arg) "%H" BOOST_STRINGIZE(arg)
|
||||
#define BOOST_ATOMIC_DETAIL_ARM_ASM_ARG_HI(arg) "%" BOOST_STRINGIZE(arg)
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_GCC_ARM_ASM_COMMON_HPP_INCLUDED_
|
||||
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/gcc_atomic_memory_order_utils.hpp
|
||||
*
|
||||
* This header contains utilities for working with gcc atomic memory order constants.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_GCC_ATOMIC_MEMORY_ORDER_UTILS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_MEMORY_ORDER_UTILS_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
/*!
|
||||
* The function converts \c boost::memory_order values to the compiler-specific constants.
|
||||
*
|
||||
* NOTE: The intention is that the function is optimized away by the compiler, and the
|
||||
* compiler-specific constants are passed to the intrinsics. Unfortunately, constexpr doesn't
|
||||
* work in this case because the standard atomics interface require memory ordering
|
||||
* constants to be passed as function arguments, at which point they stop being constexpr.
|
||||
* However, it is crucial that the compiler sees constants and not runtime values,
|
||||
* because otherwise it just ignores the ordering value and always uses seq_cst.
|
||||
* This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
|
||||
* gcc 4.8.2. Intel Compiler issues a warning in this case:
|
||||
*
|
||||
* warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order.
|
||||
*
|
||||
* while gcc acts silently.
|
||||
*
|
||||
* To mitigate the problem ALL functions, including the atomic<> members must be
|
||||
* declared with BOOST_FORCEINLINE. In this case the compilers are able to see that
|
||||
* all functions are called with constant orderings and call intrinstcts properly.
|
||||
*
|
||||
* Unfortunately, this still doesn't work in debug mode as the compiler doesn't
|
||||
* propagate constants even when functions are marked with BOOST_FORCEINLINE. In this case
|
||||
* all atomic operaions will be executed with seq_cst semantics.
|
||||
*/
|
||||
BOOST_FORCEINLINE constexpr int convert_memory_order_to_gcc(memory_order order) noexcept
|
||||
{
|
||||
return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME :
|
||||
(order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE :
|
||||
(order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_GCC_ATOMIC_MEMORY_ORDER_UTILS_HPP_INCLUDED_
|
||||
33
include/boost/atomic/detail/gcc_ppc_asm_common.hpp
Normal file
33
include/boost/atomic/detail/gcc_ppc_asm_common.hpp
Normal file
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2021 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/gcc_ppc_asm_common.hpp
|
||||
*
|
||||
* This header contains basic utilities for gcc asm-based PowerPC backend.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_GCC_PPC_ASM_COMMON_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_PPC_ASM_COMMON_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if !defined(_AIX)
|
||||
#define BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL(label) label ":\n\t"
|
||||
#define BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP(insn, label, offset) insn " " label "\n\t"
|
||||
#else
|
||||
// Standard assembler tool (as) on AIX does not support numeric jump labels, so we have to use offsets instead.
|
||||
// https://github.com/boostorg/atomic/pull/50
|
||||
#define BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL(label)
|
||||
#define BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP(insn, label, offset) insn " $" offset "\n\t"
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_GCC_PPC_ASM_COMMON_HPP_INCLUDED_
|
||||
51
include/boost/atomic/detail/has_posix_clock_traits.hpp
Normal file
51
include/boost/atomic/detail/has_posix_clock_traits.hpp
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/has_posix_clock_traits.hpp
|
||||
*
|
||||
* This header contains utilities for working with \c posix_clock_traits.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_HAS_POSIX_CLOCK_TRAITS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_HAS_POSIX_CLOCK_TRAITS_HPP_INCLUDED_
|
||||
|
||||
#include <sys/types.h> // clockid_t
|
||||
#include <type_traits>
|
||||
#include <boost/atomic/posix_clock_traits_fwd.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename Clock >
|
||||
struct has_posix_clock_traits_impl
|
||||
{
|
||||
template< typename T, clockid_t = posix_clock_traits< T >::clock_id >
|
||||
static std::true_type check_posix_clock_traits_clock_id(T*);
|
||||
static std::false_type check_posix_clock_traits_clock_id(...);
|
||||
|
||||
using type = decltype(has_posix_clock_traits_impl< Clock >::check_posix_clock_traits_clock_id(static_cast< Clock* >(nullptr)));
|
||||
};
|
||||
|
||||
//! Checks if there exists a specialization of \c posix_clock_traits for \c Clock
|
||||
template< typename Clock >
|
||||
using has_posix_clock_traits = typename has_posix_clock_traits_impl< Clock >::type;
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_HAS_POSIX_CLOCK_TRAITS_HPP_INCLUDED_
|
||||
72
include/boost/atomic/detail/header.hpp
Normal file
72
include/boost/atomic/detail/header.hpp
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright Andrey Semashev 2020.
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*/
|
||||
|
||||
#include <boost/config.hpp>
|
||||
|
||||
#if !defined(BOOST_ATOMIC_ENABLE_WARNINGS)
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
|
||||
#pragma warning(push, 3)
|
||||
// 'm_A' : class 'A' needs to have dll-interface to be used by clients of class 'B'
|
||||
#pragma warning(disable: 4251)
|
||||
// non dll-interface class 'A' used as base for dll-interface class 'B'
|
||||
#pragma warning(disable: 4275)
|
||||
// 'this' : used in base member initializer list
|
||||
#pragma warning(disable: 4355)
|
||||
// 'int' : forcing value to bool 'true' or 'false' (performance warning)
|
||||
#pragma warning(disable: 4800)
|
||||
// unreferenced formal parameter
|
||||
#pragma warning(disable: 4100)
|
||||
// conditional expression is constant
|
||||
#pragma warning(disable: 4127)
|
||||
// default constructor could not be generated
|
||||
#pragma warning(disable: 4510)
|
||||
// copy constructor could not be generated
|
||||
#pragma warning(disable: 4511)
|
||||
// assignment operator could not be generated
|
||||
#pragma warning(disable: 4512)
|
||||
// function marked as __forceinline not inlined
|
||||
#pragma warning(disable: 4714)
|
||||
// decorated name length exceeded, name was truncated
|
||||
#pragma warning(disable: 4503)
|
||||
// declaration of 'A' hides previous local declaration
|
||||
#pragma warning(disable: 4456)
|
||||
// declaration of 'A' hides global declaration
|
||||
#pragma warning(disable: 4459)
|
||||
// 'X': This function or variable may be unsafe. Consider using Y instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details.
|
||||
#pragma warning(disable: 4996)
|
||||
// 'A' : multiple assignment operators specified
|
||||
#pragma warning(disable: 4522)
|
||||
// unary minus operator applied to unsigned type, result still unsigned
|
||||
#pragma warning(disable: 4146)
|
||||
// frame pointer register 'ebx' modified by inline assembly code
|
||||
#pragma warning(disable: 4731)
|
||||
// alignment is sensitive to packing
|
||||
#pragma warning(disable: 4121)
|
||||
// 'struct_name' : structure was padded due to __declspec(align())
|
||||
#pragma warning(disable: 4324)
|
||||
|
||||
#elif defined(BOOST_GCC) && BOOST_GCC >= 40600
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
// unused parameter 'arg'
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
// missing initializer for member var
|
||||
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
|
||||
|
||||
#elif defined(BOOST_CLANG)
|
||||
|
||||
#pragma clang diagnostic push
|
||||
// unused parameter 'arg'
|
||||
#pragma clang diagnostic ignored "-Wunused-parameter"
|
||||
// missing initializer for member var
|
||||
#pragma clang diagnostic ignored "-Wmissing-field-initializers"
|
||||
|
||||
#endif
|
||||
|
||||
#endif // !defined(BOOST_ATOMIC_ENABLE_WARNINGS)
|
||||
158
include/boost/atomic/detail/int_sizes.hpp
Normal file
158
include/boost/atomic/detail/int_sizes.hpp
Normal file
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/int_sizes.hpp
|
||||
*
|
||||
* This header defines macros for testing buitin integer type sizes
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
// GCC and compatible compilers define internal macros with builtin type traits
|
||||
#if defined(__SIZEOF_SHORT__)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT __SIZEOF_SHORT__
|
||||
#endif
|
||||
#if defined(__SIZEOF_INT__)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT __SIZEOF_INT__
|
||||
#endif
|
||||
#if defined(__SIZEOF_LONG__)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG __SIZEOF_LONG__
|
||||
#endif
|
||||
#if defined(__SIZEOF_LONG_LONG__)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG __SIZEOF_LONG_LONG__
|
||||
#endif
|
||||
#if defined(__SIZEOF_WCHAR_T__)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T __SIZEOF_WCHAR_T__
|
||||
#endif
|
||||
#if defined(__SIZEOF_POINTER__)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER __SIZEOF_POINTER__
|
||||
#elif defined(_MSC_VER)
|
||||
#if defined(_M_AMD64) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_IA64)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 8
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 4
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\
|
||||
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG) ||\
|
||||
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER)
|
||||
|
||||
// Try to deduce sizes from limits
|
||||
#include <limits.h>
|
||||
#include <cstdint>
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT)
|
||||
#if (USHRT_MAX == 0xff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 1
|
||||
#elif (USHRT_MAX == 0xffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 2
|
||||
#elif (USHRT_MAX == 0xffffffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 4
|
||||
#elif (USHRT_MAX == UINT64_C(0xffffffffffffffff))
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_SHORT 8
|
||||
#endif
|
||||
#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT)
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT)
|
||||
#if (UINT_MAX == 0xff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 1
|
||||
#elif (UINT_MAX == 0xffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 2
|
||||
#elif (UINT_MAX == 0xffffffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 4
|
||||
#elif (UINT_MAX == UINT64_C(0xffffffffffffffff))
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_INT 8
|
||||
#endif
|
||||
#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT)
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG)
|
||||
#if (ULONG_MAX == 0xff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 1
|
||||
#elif (ULONG_MAX == 0xffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 2
|
||||
#elif (ULONG_MAX == 0xffffffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 4
|
||||
#elif (ULONG_MAX == UINT64_C(0xffffffffffffffff))
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG 8
|
||||
#endif
|
||||
#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG)
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG)
|
||||
#if defined(__hpux) // HP-UX's value of ULONG_LONG_MAX is unusable in preprocessor expressions
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8
|
||||
#else
|
||||
|
||||
// The list of the non-standard macros (the ones except ULLONG_MAX) is taken from boost/cstdint.hpp
|
||||
#if defined(ULLONG_MAX)
|
||||
#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULLONG_MAX
|
||||
#elif defined(ULONG_LONG_MAX)
|
||||
#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULONG_LONG_MAX
|
||||
#elif defined(ULONGLONG_MAX)
|
||||
#define BOOST_ATOMIC_DETAIL_ULLONG_MAX ULONGLONG_MAX
|
||||
#elif defined(_LLONG_MAX) // strangely enough, this one seems to be holding the limit for the unsigned integer
|
||||
#define BOOST_ATOMIC_DETAIL_ULLONG_MAX _LLONG_MAX
|
||||
#endif
|
||||
|
||||
#if (BOOST_ATOMIC_DETAIL_ULLONG_MAX == 0xff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 1
|
||||
#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX == 0xffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 2
|
||||
#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX == 0xffffffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 4
|
||||
#elif (BOOST_ATOMIC_DETAIL_ULLONG_MAX == UINT64_C(0xffffffffffffffff))
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_LLONG 8
|
||||
#endif
|
||||
|
||||
#endif // defined(__hpux)
|
||||
#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG)
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && defined(UINTPTR_MAX)
|
||||
#if (UINTPTR_MAX == 0xffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 2
|
||||
#elif (UINTPTR_MAX == 0xffffffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 4
|
||||
#elif (UINTPTR_MAX == UINT64_C(0xffffffffffffffff))
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 8
|
||||
#endif
|
||||
#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_POINTER) && defined(UINTPTR_MAX)
|
||||
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T)
|
||||
|
||||
#include <wchar.h>
|
||||
|
||||
#if defined(_MSC_VER) && (_MSC_VER <= 1310 || defined(UNDER_CE) && _MSC_VER <= 1500)
|
||||
// MSVC 7.1 and MSVC 8 (arm) define WCHAR_MAX to a value not suitable for constant expressions
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 2
|
||||
#elif (WCHAR_MAX == 0xff) || (WCHAR_MAX == 0x7f)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 1
|
||||
#elif (WCHAR_MAX == 0xffff) || (WCHAR_MAX == 0x7fff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 2
|
||||
#elif (WCHAR_MAX == 0xffffffff) || (WCHAR_MAX == 0x7fffffff)
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 4
|
||||
#elif (WCHAR_MAX == UINT64_C(0xffffffffffffffff)) || (WCHAR_MAX == INT64_C(0x7fffffffffffffff))
|
||||
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 8
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\
|
||||
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG) ||\
|
||||
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T)
|
||||
#error Boost.Atomic: Failed to determine builtin integer sizes, the target platform is not supported. Please, report to the developers (patches are welcome).
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_
|
||||
108
include/boost/atomic/detail/integral_conversions.hpp
Normal file
108
include/boost/atomic/detail/integral_conversions.hpp
Normal file
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2018-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/integral_conversions.hpp
|
||||
*
|
||||
* This header defines sign/zero extension and truncation utilities for Boost.Atomic. The tools assume two's complement signed integer representation.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_INTEGRAL_CONVERSIONS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_INTEGRAL_CONVERSIONS_HPP_INCLUDED_
|
||||
|
||||
#include <type_traits>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/bitwise_cast.hpp>
|
||||
#include <boost/atomic/detail/type_traits/is_signed.hpp>
|
||||
#include <boost/atomic/detail/type_traits/make_signed.hpp>
|
||||
#include <boost/atomic/detail/type_traits/make_unsigned.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template< typename Output, typename Input >
|
||||
BOOST_FORCEINLINE Output zero_extend_impl(Input input, std::true_type) noexcept
|
||||
{
|
||||
// Note: If we are casting with truncation or to the same-sized output, don't cause signed integer overflow by this chain of conversions
|
||||
return atomics::detail::bitwise_cast< Output >(static_cast< typename atomics::detail::make_unsigned< Output >::type >(
|
||||
static_cast< typename atomics::detail::make_unsigned< Input >::type >(input)));
|
||||
}
|
||||
|
||||
template< typename Output, typename Input >
|
||||
BOOST_FORCEINLINE Output zero_extend_impl(Input input, std::false_type) noexcept
|
||||
{
|
||||
return static_cast< Output >(static_cast< typename atomics::detail::make_unsigned< Input >::type >(input));
|
||||
}
|
||||
|
||||
//! Zero-extends or truncates (wraps) input operand to fit in the output type
|
||||
template< typename Output, typename Input >
|
||||
BOOST_FORCEINLINE Output zero_extend(Input input) noexcept
|
||||
{
|
||||
return atomics::detail::zero_extend_impl< Output >(input, std::integral_constant< bool, atomics::detail::is_signed< Output >::value >());
|
||||
}
|
||||
|
||||
//! Truncates (wraps) input operand to fit in the output type
|
||||
template< typename Output, typename Input >
|
||||
BOOST_FORCEINLINE Output integral_truncate(Input input) noexcept
|
||||
{
|
||||
// zero_extend does the truncation
|
||||
return atomics::detail::zero_extend< Output >(input);
|
||||
}
|
||||
|
||||
template< typename Output, typename Input >
|
||||
BOOST_FORCEINLINE Output sign_extend_impl(Input input, std::true_type) noexcept
|
||||
{
|
||||
return atomics::detail::integral_truncate< Output >(input);
|
||||
}
|
||||
|
||||
template< typename Output, typename Input >
|
||||
BOOST_FORCEINLINE Output sign_extend_impl(Input input, std::false_type) noexcept
|
||||
{
|
||||
return static_cast< Output >(atomics::detail::bitwise_cast< typename atomics::detail::make_signed< Input >::type >(input));
|
||||
}
|
||||
|
||||
//! Sign-extends or truncates (wraps) input operand to fit in the output type
|
||||
template< typename Output, typename Input >
|
||||
BOOST_FORCEINLINE Output sign_extend(Input input) noexcept
|
||||
{
|
||||
return atomics::detail::sign_extend_impl< Output >(input, std::integral_constant< bool, sizeof(Output) <= sizeof(Input) >());
|
||||
}
|
||||
|
||||
//! Sign-extends or truncates (wraps) input operand to fit in the output type
|
||||
template< typename Output, typename Input >
|
||||
BOOST_FORCEINLINE Output integral_extend(Input input, std::true_type) noexcept
|
||||
{
|
||||
return atomics::detail::sign_extend< Output >(input);
|
||||
}
|
||||
|
||||
//! Zero-extends or truncates (wraps) input operand to fit in the output type
|
||||
template< typename Output, typename Input >
|
||||
BOOST_FORCEINLINE Output integral_extend(Input input, std::false_type) noexcept
|
||||
{
|
||||
return atomics::detail::zero_extend< Output >(input);
|
||||
}
|
||||
|
||||
//! Sign- or zero-extends or truncates (wraps) input operand to fit in the output type
|
||||
template< bool Signed, typename Output, typename Input >
|
||||
BOOST_FORCEINLINE Output integral_extend(Input input) noexcept
|
||||
{
|
||||
return atomics::detail::integral_extend< Output >(input, std::integral_constant< bool, Signed >());
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_INTEGRAL_CONVERSIONS_HPP_INCLUDED_
|
||||
585
include/boost/atomic/detail/interlocked.hpp
Normal file
585
include/boost/atomic/detail/interlocked.hpp
Normal file
@@ -0,0 +1,585 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
|
||||
|
||||
// Copyright (c) 2009 Helge Bahmann
|
||||
// Copyright (c) 2012 - 2014, 2017 Andrey Semashev
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32_WCE)
|
||||
|
||||
#if _WIN32_WCE >= 0x600
|
||||
|
||||
extern "C" long __cdecl _InterlockedCompareExchange(long volatile*, long, long);
|
||||
extern "C" long __cdecl _InterlockedExchangeAdd(long volatile*, long);
|
||||
extern "C" long __cdecl _InterlockedExchange(long volatile*, long);
|
||||
extern "C" long __cdecl _InterlockedIncrement(long volatile*);
|
||||
extern "C" long __cdecl _InterlockedDecrement(long volatile*);
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), exchange, compare)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) _InterlockedIncrement((long*)(dest))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) _InterlockedDecrement((long*)(dest))
|
||||
|
||||
#else // _WIN32_WCE >= 0x600
|
||||
|
||||
extern "C" long __cdecl InterlockedCompareExchange(long*, long, long);
|
||||
extern "C" long __cdecl InterlockedExchangeAdd(long*, long);
|
||||
extern "C" long __cdecl InterlockedExchange(long*, long);
|
||||
extern "C" long __cdecl InterlockedIncrement(long*);
|
||||
extern "C" long __cdecl InterlockedDecrement(long*);
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), exchange, compare)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) InterlockedIncrement((long*)(dest))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) InterlockedDecrement((long*)(dest))
|
||||
|
||||
#endif // _WIN32_WCE >= 0x600
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) \
|
||||
((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), (long)(exchange), (long)(compare)))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, exchange) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE((long*)(dest), (long)(exchange)))
|
||||
|
||||
#elif defined(_MSC_VER) && _MSC_VER >= 1310
|
||||
|
||||
#if _MSC_VER < 1400
|
||||
|
||||
extern "C" long __cdecl _InterlockedCompareExchange(long volatile*, long, long);
|
||||
extern "C" long __cdecl _InterlockedExchangeAdd(long volatile*, long);
|
||||
extern "C" long __cdecl _InterlockedExchange(long volatile*, long);
|
||||
extern "C" long __cdecl _InterlockedIncrement(long volatile*);
|
||||
extern "C" long __cdecl _InterlockedDecrement(long volatile*);
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedCompareExchange)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd)
|
||||
#pragma intrinsic(_InterlockedExchange)
|
||||
#pragma intrinsic(_InterlockedIncrement)
|
||||
#pragma intrinsic(_InterlockedDecrement)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), exchange, compare)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) _InterlockedIncrement((long*)(dest))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) _InterlockedDecrement((long*)(dest))
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) \
|
||||
((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), (long)(exchange), (long)(compare)))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, exchange) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE((long*)(dest), (long)(exchange)))
|
||||
|
||||
#else // _MSC_VER < 1400
|
||||
|
||||
#include <intrin.h>
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedCompareExchange)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd)
|
||||
#pragma intrinsic(_InterlockedExchange)
|
||||
#pragma intrinsic(_InterlockedIncrement)
|
||||
#pragma intrinsic(_InterlockedDecrement)
|
||||
#pragma intrinsic(_InterlockedAnd)
|
||||
#pragma intrinsic(_InterlockedOr)
|
||||
#pragma intrinsic(_InterlockedXor)
|
||||
#pragma intrinsic(_interlockedbittestandset)
|
||||
#pragma intrinsic(_interlockedbittestandreset)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) _InterlockedIncrement((long*)(dest))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) _InterlockedDecrement((long*)(dest))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND(dest, arg) _InterlockedAnd((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR(dest, arg) _InterlockedOr((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR(dest, arg) _InterlockedXor((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_BTS(dest, arg) _interlockedbittestandset((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_BTR(dest, arg) _interlockedbittestandreset((long*)(dest), (long)(arg))
|
||||
|
||||
#if defined(_M_AMD64) && !defined(_M_ARM64EC)
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_interlockedbittestandset64)
|
||||
#pragma intrinsic(_interlockedbittestandreset64)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_BTS64(dest, arg) _interlockedbittestandset64((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_BTR64(dest, arg) _interlockedbittestandreset64((__int64*)(dest), (__int64)(arg))
|
||||
#endif // defined(_M_AMD64) && !defined(_M_ARM64EC)
|
||||
|
||||
#if (defined(_M_IX86) && _M_IX86 >= 500) || (defined(_M_AMD64) && !defined(_M_ARM64EC)) || defined(_M_IA64)
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedCompareExchange64)
|
||||
#endif
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
|
||||
#endif
|
||||
|
||||
#if _MSC_VER >= 1500 && (defined(_M_AMD64) && !defined(_M_ARM64EC))
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedCompareExchange128)
|
||||
#endif
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange128((__int64*)(dest), ((const __int64*)(&exchange))[1], ((const __int64*)(&exchange))[0], (__int64*)(compare))
|
||||
#endif
|
||||
|
||||
#if _MSC_VER >= 1600
|
||||
|
||||
// MSVC 2010 and later provide intrinsics for 8 and 16 bit integers.
|
||||
// Note that for each bit count these macros must be either all defined or all not defined.
|
||||
// Otherwise atomic<> operations will be implemented inconsistently.
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedCompareExchange8)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd8)
|
||||
#pragma intrinsic(_InterlockedExchange8)
|
||||
#pragma intrinsic(_InterlockedAnd8)
|
||||
#pragma intrinsic(_InterlockedOr8)
|
||||
#pragma intrinsic(_InterlockedXor8)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(dest, exchange, compare) _InterlockedCompareExchange8((char*)(dest), (char)(exchange), (char)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(dest, addend) _InterlockedExchangeAdd8((char*)(dest), (char)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval) _InterlockedExchange8((char*)(dest), (char)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND8(dest, arg) _InterlockedAnd8((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR8(dest, arg) _InterlockedOr8((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR8(dest, arg) _InterlockedXor8((char*)(dest), (char)(arg))
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedCompareExchange16)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd16)
|
||||
#pragma intrinsic(_InterlockedExchange16)
|
||||
#pragma intrinsic(_InterlockedAnd16)
|
||||
#pragma intrinsic(_InterlockedOr16)
|
||||
#pragma intrinsic(_InterlockedXor16)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(dest, exchange, compare) _InterlockedCompareExchange16((short*)(dest), (short)(exchange), (short)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(dest, addend) _InterlockedExchangeAdd16((short*)(dest), (short)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval) _InterlockedExchange16((short*)(dest), (short)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND16(dest, arg) _InterlockedAnd16((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR16(dest, arg) _InterlockedOr16((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR16(dest, arg) _InterlockedXor16((short*)(dest), (short)(arg))
|
||||
|
||||
#endif // _MSC_VER >= 1600
|
||||
|
||||
#if (defined(_M_AMD64) && !defined(_M_ARM64EC)) || defined(_M_IA64)
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd64)
|
||||
#pragma intrinsic(_InterlockedExchange64)
|
||||
#pragma intrinsic(_InterlockedAnd64)
|
||||
#pragma intrinsic(_InterlockedOr64)
|
||||
#pragma intrinsic(_InterlockedXor64)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedCompareExchangePointer)
|
||||
#pragma intrinsic(_InterlockedExchangePointer)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((long*)(dest), byte_offset))
|
||||
|
||||
#elif defined(_M_IX86)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)_InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)_InterlockedExchange((long*)(dest), (long)(newval)))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
|
||||
|
||||
#endif
|
||||
|
||||
#if _MSC_VER >= 1700 && (defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC))
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd64)
|
||||
#pragma intrinsic(_InterlockedExchange64)
|
||||
#pragma intrinsic(_InterlockedAnd64)
|
||||
#pragma intrinsic(_InterlockedOr64)
|
||||
#pragma intrinsic(_InterlockedXor64)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedCompareExchange8_nf)
|
||||
#pragma intrinsic(_InterlockedCompareExchange8_acq)
|
||||
#pragma intrinsic(_InterlockedCompareExchange8_rel)
|
||||
#pragma intrinsic(_InterlockedCompareExchange16_nf)
|
||||
#pragma intrinsic(_InterlockedCompareExchange16_acq)
|
||||
#pragma intrinsic(_InterlockedCompareExchange16_rel)
|
||||
#pragma intrinsic(_InterlockedCompareExchange_nf)
|
||||
#pragma intrinsic(_InterlockedCompareExchange_acq)
|
||||
#pragma intrinsic(_InterlockedCompareExchange_rel)
|
||||
#pragma intrinsic(_InterlockedCompareExchange64)
|
||||
#pragma intrinsic(_InterlockedCompareExchange64_nf)
|
||||
#pragma intrinsic(_InterlockedCompareExchange64_acq)
|
||||
#pragma intrinsic(_InterlockedCompareExchange64_rel)
|
||||
#if _MSC_VER >= 1900 && (defined(_M_ARM64) || defined(_M_ARM64EC))
|
||||
#pragma intrinsic(_InterlockedCompareExchange128)
|
||||
#pragma intrinsic(_InterlockedCompareExchange128_nf)
|
||||
#pragma intrinsic(_InterlockedCompareExchange128_acq)
|
||||
#pragma intrinsic(_InterlockedCompareExchange128_rel)
|
||||
#endif
|
||||
#pragma intrinsic(_InterlockedCompareExchangePointer)
|
||||
#pragma intrinsic(_InterlockedCompareExchangePointer_nf)
|
||||
#pragma intrinsic(_InterlockedCompareExchangePointer_acq)
|
||||
#pragma intrinsic(_InterlockedCompareExchangePointer_rel)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange8_nf((char*)(dest), (char)(exchange), (char)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange8_acq((char*)(dest), (char)(exchange), (char)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange8_rel((char*)(dest), (char)(exchange), (char)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange16_nf((short*)(dest), (short)(exchange), (short)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange16_acq((short*)(dest), (short)(exchange), (short)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange16_rel((short*)(dest), (short)(exchange), (short)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange_nf((long*)(dest), (long)(exchange), (long)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange_acq((long*)(dest), (long)(exchange), (long)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange_rel((long*)(dest), (long)(exchange), (long)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange64_nf((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange64_acq((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange64_rel((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
|
||||
#if _MSC_VER >= 1900 && (defined(_M_ARM64) || defined(_M_ARM64EC))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange128((__int64*)(dest), ((const __int64*)(&exchange))[1], ((const __int64*)(&exchange))[0], (__int64*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128_RELAXED(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange128_nf((__int64*)(dest), ((const __int64*)(&exchange))[1], ((const __int64*)(&exchange))[0], (__int64*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128_ACQUIRE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange128_acq((__int64*)(dest), ((const __int64*)(&exchange))[1], ((const __int64*)(&exchange))[0], (__int64*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128_RELEASE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchange128_rel((__int64*)(dest), ((const __int64*)(&exchange))[1], ((const __int64*)(&exchange))[0], (__int64*)(compare))
|
||||
#endif
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) \
|
||||
_InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELAXED(dest, exchange, compare) \
|
||||
_InterlockedCompareExchangePointer_nf((void**)(dest), (void*)(exchange), (void*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_ACQUIRE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchangePointer_acq((void**)(dest), (void*)(exchange), (void*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER_RELEASE(dest, exchange, compare) \
|
||||
_InterlockedCompareExchangePointer_rel((void**)(dest), (void*)(exchange), (void*)(compare))
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd8_nf)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd8_acq)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd8_rel)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd16_nf)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd16_acq)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd16_rel)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd_nf)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd_acq)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd_rel)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd64_nf)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd64_acq)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd64_rel)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(dest, addend) _InterlockedExchangeAdd8_nf((char*)(dest), (char)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(dest, addend) _InterlockedExchangeAdd8_acq((char*)(dest), (char)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(dest, addend) _InterlockedExchangeAdd8_rel((char*)(dest), (char)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(dest, addend) _InterlockedExchangeAdd16_nf((short*)(dest), (short)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(dest, addend) _InterlockedExchangeAdd16_acq((short*)(dest), (short)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(dest, addend) _InterlockedExchangeAdd16_rel((short*)(dest), (short)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(dest, addend) _InterlockedExchangeAdd_nf((long*)(dest), (long)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(dest, addend) _InterlockedExchangeAdd_acq((long*)(dest), (long)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(dest, addend) _InterlockedExchangeAdd_rel((long*)(dest), (long)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(dest, addend) _InterlockedExchangeAdd64_nf((__int64*)(dest), (__int64)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(dest, addend) _InterlockedExchangeAdd64_acq((__int64*)(dest), (__int64)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(dest, addend) _InterlockedExchangeAdd64_rel((__int64*)(dest), (__int64)(addend))
|
||||
|
||||
#if defined(_M_ARM64) || defined(_M_ARM64EC)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((__int64*)(dest), byte_offset))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELAXED(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED((__int64*)(dest), byte_offset))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_ACQUIRE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE((__int64*)(dest), byte_offset))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELEASE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE((__int64*)(dest), byte_offset))
|
||||
#else
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELAXED(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED((long*)(dest), byte_offset))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_ACQUIRE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE((long*)(dest), byte_offset))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER_RELEASE(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE((long*)(dest), byte_offset))
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedExchange8_nf)
|
||||
#pragma intrinsic(_InterlockedExchange8_acq)
|
||||
#pragma intrinsic(_InterlockedExchange16_nf)
|
||||
#pragma intrinsic(_InterlockedExchange16_acq)
|
||||
#pragma intrinsic(_InterlockedExchange_nf)
|
||||
#pragma intrinsic(_InterlockedExchange_acq)
|
||||
#pragma intrinsic(_InterlockedExchange64_nf)
|
||||
#pragma intrinsic(_InterlockedExchange64_acq)
|
||||
#pragma intrinsic(_InterlockedExchangePointer)
|
||||
#pragma intrinsic(_InterlockedExchangePointer_nf)
|
||||
#pragma intrinsic(_InterlockedExchangePointer_acq)
|
||||
#if _MSC_VER >= 1800
|
||||
#pragma intrinsic(_InterlockedExchange8_rel)
|
||||
#pragma intrinsic(_InterlockedExchange16_rel)
|
||||
#pragma intrinsic(_InterlockedExchange_rel)
|
||||
#pragma intrinsic(_InterlockedExchange64_rel)
|
||||
#pragma intrinsic(_InterlockedExchangePointer_rel)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(dest, newval) _InterlockedExchange8_nf((char*)(dest), (char)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(dest, newval) _InterlockedExchange8_acq((char*)(dest), (char)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(dest, newval) _InterlockedExchange16_nf((short*)(dest), (short)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(dest, newval) _InterlockedExchange16_acq((short*)(dest), (short)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(dest, newval) _InterlockedExchange_nf((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(dest, newval) _InterlockedExchange_acq((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(dest, newval) _InterlockedExchange64_nf((__int64*)(dest), (__int64)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(dest, newval) _InterlockedExchange64_acq((__int64*)(dest), (__int64)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELAXED(dest, newval) _InterlockedExchangePointer_nf((void**)(dest), (void*)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_ACQUIRE(dest, newval) _InterlockedExchangePointer_acq((void**)(dest), (void*)(newval))
|
||||
|
||||
#if _MSC_VER >= 1800
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) _InterlockedExchange8_rel((char*)(dest), (char)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) _InterlockedExchange16_rel((short*)(dest), (short)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) _InterlockedExchange_rel((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) _InterlockedExchange64_rel((__int64*)(dest), (__int64)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) _InterlockedExchangePointer_rel((void**)(dest), (void*)(newval))
|
||||
#else
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER_RELEASE(dest, newval) BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval)
|
||||
#endif
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedAnd8_nf)
|
||||
#pragma intrinsic(_InterlockedAnd8_acq)
|
||||
#pragma intrinsic(_InterlockedAnd8_rel)
|
||||
#pragma intrinsic(_InterlockedAnd16_nf)
|
||||
#pragma intrinsic(_InterlockedAnd16_acq)
|
||||
#pragma intrinsic(_InterlockedAnd16_rel)
|
||||
#pragma intrinsic(_InterlockedAnd_nf)
|
||||
#pragma intrinsic(_InterlockedAnd_acq)
|
||||
#pragma intrinsic(_InterlockedAnd_rel)
|
||||
#pragma intrinsic(_InterlockedAnd64_nf)
|
||||
#pragma intrinsic(_InterlockedAnd64_acq)
|
||||
#pragma intrinsic(_InterlockedAnd64_rel)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(dest, arg) _InterlockedAnd8_nf((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(dest, arg) _InterlockedAnd8_acq((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(dest, arg) _InterlockedAnd8_rel((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(dest, arg) _InterlockedAnd16_nf((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(dest, arg) _InterlockedAnd16_acq((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(dest, arg) _InterlockedAnd16_rel((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(dest, arg) _InterlockedAnd_nf((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(dest, arg) _InterlockedAnd_acq((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(dest, arg) _InterlockedAnd_rel((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(dest, arg) _InterlockedAnd64_nf((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(dest, arg) _InterlockedAnd64_acq((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(dest, arg) _InterlockedAnd64_rel((__int64*)(dest), (__int64)(arg))
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedOr8_nf)
|
||||
#pragma intrinsic(_InterlockedOr8_acq)
|
||||
#pragma intrinsic(_InterlockedOr8_rel)
|
||||
#pragma intrinsic(_InterlockedOr16_nf)
|
||||
#pragma intrinsic(_InterlockedOr16_acq)
|
||||
#pragma intrinsic(_InterlockedOr16_rel)
|
||||
#pragma intrinsic(_InterlockedOr_nf)
|
||||
#pragma intrinsic(_InterlockedOr_acq)
|
||||
#pragma intrinsic(_InterlockedOr_rel)
|
||||
#pragma intrinsic(_InterlockedOr64_nf)
|
||||
#pragma intrinsic(_InterlockedOr64_acq)
|
||||
#pragma intrinsic(_InterlockedOr64_rel)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(dest, arg) _InterlockedOr8_nf((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(dest, arg) _InterlockedOr8_acq((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(dest, arg) _InterlockedOr8_rel((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(dest, arg) _InterlockedOr16_nf((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(dest, arg) _InterlockedOr16_acq((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(dest, arg) _InterlockedOr16_rel((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(dest, arg) _InterlockedOr_nf((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(dest, arg) _InterlockedOr_acq((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(dest, arg) _InterlockedOr_rel((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(dest, arg) _InterlockedOr64_nf((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(dest, arg) _InterlockedOr64_acq((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(dest, arg) _InterlockedOr64_rel((__int64*)(dest), (__int64)(arg))
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_InterlockedXor8_nf)
|
||||
#pragma intrinsic(_InterlockedXor8_acq)
|
||||
#pragma intrinsic(_InterlockedXor8_rel)
|
||||
#pragma intrinsic(_InterlockedXor16_nf)
|
||||
#pragma intrinsic(_InterlockedXor16_acq)
|
||||
#pragma intrinsic(_InterlockedXor16_rel)
|
||||
#pragma intrinsic(_InterlockedXor_nf)
|
||||
#pragma intrinsic(_InterlockedXor_acq)
|
||||
#pragma intrinsic(_InterlockedXor_rel)
|
||||
#pragma intrinsic(_InterlockedXor64_nf)
|
||||
#pragma intrinsic(_InterlockedXor64_acq)
|
||||
#pragma intrinsic(_InterlockedXor64_rel)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(dest, arg) _InterlockedXor8_nf((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(dest, arg) _InterlockedXor8_acq((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(dest, arg) _InterlockedXor8_rel((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(dest, arg) _InterlockedXor16_nf((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(dest, arg) _InterlockedXor16_acq((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(dest, arg) _InterlockedXor16_rel((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(dest, arg) _InterlockedXor_nf((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(dest, arg) _InterlockedXor_acq((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(dest, arg) _InterlockedXor_rel((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(dest, arg) _InterlockedXor64_nf((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(dest, arg) _InterlockedXor64_acq((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(dest, arg) _InterlockedXor64_rel((__int64*)(dest), (__int64)(arg))
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_interlockedbittestandset_nf)
|
||||
#pragma intrinsic(_interlockedbittestandset_acq)
|
||||
#pragma intrinsic(_interlockedbittestandset_rel)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED(dest, arg) _interlockedbittestandset_nf((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE(dest, arg) _interlockedbittestandset_acq((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE(dest, arg) _interlockedbittestandset_rel((long*)(dest), (long)(arg))
|
||||
|
||||
#if defined(BOOST_MSVC)
|
||||
#pragma intrinsic(_interlockedbittestandreset_nf)
|
||||
#pragma intrinsic(_interlockedbittestandreset_acq)
|
||||
#pragma intrinsic(_interlockedbittestandreset_rel)
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED(dest, arg) _interlockedbittestandreset_nf((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE(dest, arg) _interlockedbittestandreset_acq((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE(dest, arg) _interlockedbittestandreset_rel((long*)(dest), (long)(arg))
|
||||
|
||||
#endif // _MSC_VER >= 1700 && defined(_M_ARM)
|
||||
|
||||
#endif // _MSC_VER < 1400
|
||||
|
||||
#else // defined(_MSC_VER) && _MSC_VER >= 1310
|
||||
|
||||
#if defined(BOOST_USE_WINDOWS_H)
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) InterlockedIncrement((long*)(dest))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) InterlockedDecrement((long*)(dest))
|
||||
|
||||
#if defined(_WIN64)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) InterlockedExchange64((__int64*)(dest), (__int64)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) InterlockedExchangePointer((void**)(dest), (void*)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
|
||||
|
||||
#else // defined(_WIN64)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
|
||||
|
||||
#endif // defined(_WIN64)
|
||||
|
||||
#else // defined(BOOST_USE_WINDOWS_H)
|
||||
|
||||
#if defined(__MINGW64__)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_IMPORT
|
||||
#else
|
||||
#define BOOST_ATOMIC_INTERLOCKED_IMPORT __declspec(dllimport)
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
extern "C" {
|
||||
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedCompareExchange(long volatile*, long, long);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchange(long volatile*, long);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchangeAdd(long volatile*, long);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedIncrement(long volatile*, long);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedDecrement(long volatile*, long);
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) \
|
||||
boost::atomics::detail::InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) boost::atomics::detail::InterlockedExchange((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) boost::atomics::detail::InterlockedExchangeAdd((long*)(dest), (long)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_INCREMENT(dest) boost::atomics::detail::InterlockedIncrement((long*)(dest))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_DECREMENT(dest) boost::atomics::detail::InterlockedDecrement((long*)(dest))
|
||||
|
||||
#if defined(_WIN64)
|
||||
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchange64(__int64 volatile*, __int64);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchangeAdd64(__int64 volatile*, __int64);
|
||||
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedCompareExchangePointer(void* volatile*, void*, void*);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedExchangePointer(void* volatile*, void*);
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) \
|
||||
boost::atomics::detail::InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) boost::atomics::detail::InterlockedExchange64((__int64*)(dest), (__int64)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) boost::atomics::detail::InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) \
|
||||
boost::atomics::detail::InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) boost::atomics::detail::InterlockedExchangePointer((void**)(dest), (void*)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
|
||||
|
||||
#else // defined(_WIN64)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
|
||||
|
||||
#endif // defined(_WIN64)
|
||||
|
||||
} // extern "C"
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#undef BOOST_ATOMIC_INTERLOCKED_IMPORT
|
||||
|
||||
#endif // defined(BOOST_USE_WINDOWS_H)
|
||||
|
||||
#endif // defined(_MSC_VER)
|
||||
|
||||
#endif
|
||||
46
include/boost/atomic/detail/intptr.hpp
Normal file
46
include/boost/atomic/detail/intptr.hpp
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/intptr.hpp
|
||||
*
|
||||
* This header defines (u)intptr_t types.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_INTPTR_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_INTPTR_HPP_INCLUDED_
|
||||
|
||||
#include <cstdint>
|
||||
#if !defined(UINTPTR_MAX)
|
||||
#include <cstddef>
|
||||
#endif
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
#if defined(UINTPTR_MAX)
|
||||
using std::uintptr_t;
|
||||
using std::intptr_t;
|
||||
#else
|
||||
using uintptr_t = std::size_t;
|
||||
using intptr_t = std::ptrdiff_t;
|
||||
#endif
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_INTPTR_HPP_INCLUDED_
|
||||
58
include/boost/atomic/detail/link.hpp
Normal file
58
include/boost/atomic/detail/link.hpp
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2012 Hartmut Kaiser
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/config.hpp
|
||||
*
|
||||
* This header defines macros for linking with compiled library of Boost.Atomic
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_LINK_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_LINK_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Set up dll import/export options
|
||||
#if (defined(BOOST_ATOMIC_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)) && \
|
||||
!defined(BOOST_ATOMIC_STATIC_LINK)
|
||||
|
||||
#if defined(BOOST_ATOMIC_SOURCE)
|
||||
#define BOOST_ATOMIC_DECL BOOST_SYMBOL_EXPORT
|
||||
#define BOOST_ATOMIC_BUILD_DLL
|
||||
#else
|
||||
#define BOOST_ATOMIC_DECL BOOST_SYMBOL_IMPORT
|
||||
#endif
|
||||
|
||||
#endif // building a shared library
|
||||
|
||||
#ifndef BOOST_ATOMIC_DECL
|
||||
#define BOOST_ATOMIC_DECL
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Auto library naming
|
||||
#if !defined(BOOST_ATOMIC_SOURCE) && !defined(BOOST_ALL_NO_LIB) && \
|
||||
!defined(BOOST_ATOMIC_NO_LIB)
|
||||
|
||||
#define BOOST_LIB_NAME boost_atomic
|
||||
|
||||
// tell the auto-link code to select a dll when required:
|
||||
#if defined(BOOST_ALL_DYN_LINK) || defined(BOOST_ATOMIC_DYN_LINK)
|
||||
#define BOOST_DYN_LINK
|
||||
#endif
|
||||
|
||||
#include <boost/config/auto_link.hpp>
|
||||
|
||||
#endif // auto-linking disabled
|
||||
|
||||
#endif
|
||||
171
include/boost/atomic/detail/lock_pool.hpp
Normal file
171
include/boost/atomic/detail/lock_pool.hpp
Normal file
@@ -0,0 +1,171 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2011 Helge Bahmann
|
||||
* Copyright (c) 2013-2014, 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/lock_pool.hpp
|
||||
*
|
||||
* This header contains declaration of the lock pool used to emulate atomic ops.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
|
||||
|
||||
#include <cstddef>
|
||||
#include <chrono>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/link.hpp>
|
||||
#include <boost/atomic/detail/intptr.hpp>
|
||||
#if defined(BOOST_WINDOWS)
|
||||
#include <boost/winapi/thread.hpp>
|
||||
#else // defined(BOOST_WINDOWS)
|
||||
#include <time.h>
|
||||
#if !defined(BOOST_HAS_NANOSLEEP)
|
||||
#include <unistd.h>
|
||||
#endif // !defined(BOOST_HAS_NANOSLEEP)
|
||||
#endif // defined(BOOST_WINDOWS)
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
BOOST_FORCEINLINE void wait_some() noexcept
|
||||
{
|
||||
#if defined(BOOST_WINDOWS)
|
||||
boost::winapi::SwitchToThread();
|
||||
#elif defined(BOOST_HAS_NANOSLEEP)
|
||||
// Do not use sched_yield or pthread_yield as at least on Linux it doesn't block the thread if there are no other
|
||||
// pending threads on the current CPU. Proper sleeping is guaranteed to block the thread, which allows other threads
|
||||
// to potentially migrate to this CPU and complete the tasks we're waiting for.
|
||||
timespec ts{};
|
||||
ts.tv_sec = 0;
|
||||
ts.tv_nsec = 1000;
|
||||
nanosleep(&ts, nullptr);
|
||||
#else
|
||||
usleep(1);
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace lock_pool {
|
||||
|
||||
BOOST_ATOMIC_DECL void* short_lock(atomics::detail::uintptr_t h) noexcept;
|
||||
BOOST_ATOMIC_DECL void* long_lock(atomics::detail::uintptr_t h) noexcept;
|
||||
BOOST_ATOMIC_DECL void unlock(void* vls) noexcept;
|
||||
|
||||
BOOST_ATOMIC_DECL void* allocate_wait_state(void* vls, const volatile void* addr) noexcept;
|
||||
BOOST_ATOMIC_DECL void free_wait_state(void* vls, void* vws) noexcept;
|
||||
BOOST_ATOMIC_DECL void wait(void* vls, void* vws) noexcept;
|
||||
#if !defined(BOOST_WINDOWS)
|
||||
BOOST_ATOMIC_DECL bool wait_until(void* vls, void* vws, clockid_t clock_id, timespec const& abs_timeout) noexcept;
|
||||
#endif // !defined(BOOST_WINDOWS)
|
||||
BOOST_ATOMIC_DECL bool wait_for(void* vls, void* vws, std::chrono::nanoseconds rel_timeout) noexcept;
|
||||
BOOST_ATOMIC_DECL void notify_one(void* vls, const volatile void* addr) noexcept;
|
||||
BOOST_ATOMIC_DECL void notify_all(void* vls, const volatile void* addr) noexcept;
|
||||
|
||||
BOOST_ATOMIC_DECL void thread_fence() noexcept;
|
||||
BOOST_ATOMIC_DECL void signal_fence() noexcept;
|
||||
|
||||
template< std::size_t Alignment >
|
||||
BOOST_FORCEINLINE atomics::detail::uintptr_t hash_ptr(const volatile void* addr) noexcept
|
||||
{
|
||||
atomics::detail::uintptr_t ptr = reinterpret_cast< atomics::detail::uintptr_t >(addr);
|
||||
atomics::detail::uintptr_t h = ptr / Alignment;
|
||||
|
||||
// Since many malloc/new implementations return pointers with higher alignment
|
||||
// than indicated by Alignment, it makes sense to mix higher bits
|
||||
// into the lower ones. On 64-bit platforms, malloc typically aligns to 16 bytes,
|
||||
// on 32-bit - to 8 bytes.
|
||||
constexpr std::size_t malloc_alignment = sizeof(void*) >= 8u ? 16u : 8u;
|
||||
BOOST_IF_CONSTEXPR (Alignment != malloc_alignment)
|
||||
h ^= ptr / malloc_alignment;
|
||||
|
||||
return h;
|
||||
}
|
||||
|
||||
template< std::size_t Alignment, bool LongLock = false >
|
||||
class scoped_lock
|
||||
{
|
||||
private:
|
||||
void* m_lock;
|
||||
|
||||
public:
|
||||
explicit scoped_lock(const volatile void* addr) noexcept
|
||||
{
|
||||
atomics::detail::uintptr_t h = lock_pool::hash_ptr< Alignment >(addr);
|
||||
BOOST_IF_CONSTEXPR (!LongLock)
|
||||
m_lock = lock_pool::short_lock(h);
|
||||
else
|
||||
m_lock = lock_pool::long_lock(h);
|
||||
}
|
||||
|
||||
scoped_lock(scoped_lock const&) = delete;
|
||||
scoped_lock& operator=(scoped_lock const&) = delete;
|
||||
|
||||
~scoped_lock() noexcept
|
||||
{
|
||||
lock_pool::unlock(m_lock);
|
||||
}
|
||||
|
||||
void* get_lock_state() const noexcept
|
||||
{
|
||||
return m_lock;
|
||||
}
|
||||
};
|
||||
|
||||
template< std::size_t Alignment >
|
||||
class scoped_wait_state :
|
||||
public scoped_lock< Alignment, true >
|
||||
{
|
||||
private:
|
||||
void* m_wait_state;
|
||||
|
||||
public:
|
||||
explicit scoped_wait_state(const volatile void* addr) noexcept :
|
||||
scoped_lock< Alignment, true >(addr)
|
||||
{
|
||||
m_wait_state = lock_pool::allocate_wait_state(this->get_lock_state(), addr);
|
||||
}
|
||||
|
||||
scoped_wait_state(scoped_wait_state const&) = delete;
|
||||
scoped_wait_state& operator=(scoped_wait_state const&) = delete;
|
||||
|
||||
~scoped_wait_state() noexcept
|
||||
{
|
||||
lock_pool::free_wait_state(this->get_lock_state(), m_wait_state);
|
||||
}
|
||||
|
||||
void wait() noexcept
|
||||
{
|
||||
lock_pool::wait(this->get_lock_state(), m_wait_state);
|
||||
}
|
||||
|
||||
#if !defined(BOOST_WINDOWS)
|
||||
bool wait_until(clockid_t clock_id, timespec const& abs_timeout) noexcept
|
||||
{
|
||||
return lock_pool::wait_until(this->get_lock_state(), m_wait_state, clock_id, abs_timeout);
|
||||
}
|
||||
#endif // !defined(BOOST_WINDOWS)
|
||||
|
||||
bool wait_for(std::chrono::nanoseconds rel_timeout) noexcept
|
||||
{
|
||||
return lock_pool::wait_for(this->get_lock_state(), m_wait_state, rel_timeout);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace lock_pool
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
|
||||
47
include/boost/atomic/detail/memory_order_utils.hpp
Normal file
47
include/boost/atomic/detail/memory_order_utils.hpp
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/memory_order_utils.hpp
|
||||
*
|
||||
* This header contains utilities related to memory order constants.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_MEMORY_ORDER_UTILS_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_MEMORY_ORDER_UTILS_HPP_INCLUDED_
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
BOOST_FORCEINLINE constexpr memory_order deduce_failure_order(memory_order order) noexcept
|
||||
{
|
||||
return order == memory_order_acq_rel ? memory_order_acquire : (order == memory_order_release ? memory_order_relaxed : order);
|
||||
}
|
||||
|
||||
BOOST_FORCEINLINE constexpr bool cas_failure_order_must_not_be_stronger_than_success_order(memory_order success_order, memory_order failure_order) noexcept
|
||||
{
|
||||
// 15 == (memory_order_seq_cst | memory_order_consume), see memory_order.hpp
|
||||
// Given the enum values we can test the strength of memory order requirements with this single condition.
|
||||
return (static_cast< unsigned int >(failure_order) & 15u) <= (static_cast< unsigned int >(success_order) & 15u);
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_MEMORY_ORDER_UTILS_HPP_INCLUDED_
|
||||
43
include/boost/atomic/detail/once_flag.hpp
Normal file
43
include/boost/atomic/detail/once_flag.hpp
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020-2025 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/once_flag.hpp
|
||||
*
|
||||
* This header declares \c once_flag structure for controlling one time initialization
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_ONCE_FLAG_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_ONCE_FLAG_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/aligned_variable.hpp>
|
||||
#include <boost/atomic/detail/core_operations.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
using once_flag_operations = atomics::detail::core_operations< 1u, false, false >;
|
||||
|
||||
struct once_flag
|
||||
{
|
||||
BOOST_ATOMIC_DETAIL_ALIGNED_VAR(once_flag_operations::storage_alignment, once_flag_operations::storage_type, m_flag);
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_ONCE_FLAG_HPP_INCLUDED_
|
||||
53
include/boost/atomic/detail/ops_gcc_aarch32_common.hpp
Normal file
53
include/boost/atomic/detail/ops_gcc_aarch32_common.hpp
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/ops_gcc_aarch32_common.hpp
|
||||
*
|
||||
* This header contains basic utilities for gcc AArch32 backend.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH32_COMMON_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH32_COMMON_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/capabilities.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH32_MO_SWITCH(mo)\
|
||||
switch (mo)\
|
||||
{\
|
||||
case memory_order_relaxed:\
|
||||
BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN("r", "r")\
|
||||
break;\
|
||||
\
|
||||
case memory_order_consume:\
|
||||
case memory_order_acquire:\
|
||||
BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN("a", "r")\
|
||||
break;\
|
||||
\
|
||||
case memory_order_release:\
|
||||
BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN("r", "l")\
|
||||
break;\
|
||||
\
|
||||
default:\
|
||||
BOOST_ATOMIC_DETAIL_AARCH32_MO_INSN("a", "l")\
|
||||
break;\
|
||||
}
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_AARCH32_LITTLE_ENDIAN)
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(arg) "%" BOOST_STRINGIZE(arg)
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(arg) "%H" BOOST_STRINGIZE(arg)
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_LO(arg) "%H" BOOST_STRINGIZE(arg)
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH32_ASM_ARG_HI(arg) "%" BOOST_STRINGIZE(arg)
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH32_COMMON_HPP_INCLUDED_
|
||||
53
include/boost/atomic/detail/ops_gcc_aarch64_common.hpp
Normal file
53
include/boost/atomic/detail/ops_gcc_aarch64_common.hpp
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2020 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/ops_gcc_aarch64_common.hpp
|
||||
*
|
||||
* This header contains basic utilities for gcc AArch64 backend.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH64_COMMON_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH64_COMMON_HPP_INCLUDED_
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/capabilities.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_MO_SWITCH(mo)\
|
||||
switch (mo)\
|
||||
{\
|
||||
case memory_order_relaxed:\
|
||||
BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN("", "")\
|
||||
break;\
|
||||
\
|
||||
case memory_order_consume:\
|
||||
case memory_order_acquire:\
|
||||
BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN("a", "")\
|
||||
break;\
|
||||
\
|
||||
case memory_order_release:\
|
||||
BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN("", "l")\
|
||||
break;\
|
||||
\
|
||||
default:\
|
||||
BOOST_ATOMIC_DETAIL_AARCH64_MO_INSN("a", "l")\
|
||||
break;\
|
||||
}
|
||||
|
||||
#if defined(BOOST_ATOMIC_DETAIL_AARCH64_LITTLE_ENDIAN)
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "0"
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "1"
|
||||
#else
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_LO "1"
|
||||
#define BOOST_ATOMIC_DETAIL_AARCH64_ASM_ARG_HI "0"
|
||||
#endif
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_AARCH64_COMMON_HPP_INCLUDED_
|
||||
63
include/boost/atomic/detail/ops_gcc_arm_common.hpp
Normal file
63
include/boost/atomic/detail/ops_gcc_arm_common.hpp
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Distributed under the Boost Software License, Version 1.0.
|
||||
* (See accompanying file LICENSE_1_0.txt or copy at
|
||||
* http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
* Copyright (c) 2009 Helge Bahmann
|
||||
* Copyright (c) 2013 Tim Blechmann
|
||||
* Copyright (c) 2014 Andrey Semashev
|
||||
*/
|
||||
/*!
|
||||
* \file atomic/detail/ops_gcc_arm_common.hpp
|
||||
*
|
||||
* This header contains basic utilities for gcc ARM backend.
|
||||
*/
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
|
||||
#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
|
||||
|
||||
#include <cstdint>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/fence_arch_operations.hpp>
|
||||
#include <boost/atomic/detail/header.hpp>
|
||||
|
||||
#ifdef BOOST_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
struct core_arch_operations_gcc_arm_base
|
||||
{
|
||||
static constexpr bool full_cas_based = false;
|
||||
static constexpr bool is_always_lock_free = true;
|
||||
|
||||
static BOOST_FORCEINLINE void fence_before(memory_order order) noexcept
|
||||
{
|
||||
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
|
||||
fence_arch_operations::hardware_full_fence();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after(memory_order order) noexcept
|
||||
{
|
||||
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
|
||||
fence_arch_operations::hardware_full_fence();
|
||||
}
|
||||
|
||||
static BOOST_FORCEINLINE void fence_after_store(memory_order order) noexcept
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
fence_arch_operations::hardware_full_fence();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#include <boost/atomic/detail/footer.hpp>
|
||||
|
||||
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user