diff --git a/include/boost/fiber/detail/spinlock.hpp b/include/boost/fiber/detail/spinlock.hpp index 61c82376..3be04768 100644 --- a/include/boost/fiber/detail/spinlock.hpp +++ b/include/boost/fiber/detail/spinlock.hpp @@ -20,12 +20,12 @@ namespace detail { class BOOST_FIBERS_DECL atomic_spinlock { private: - enum class atomic_spinlock_status { + enum class spinlock_status { locked = 0, unlocked }; - std::atomic< atomic_spinlock_status > state_{ atomic_spinlock_status::unlocked }; + std::atomic< spinlock_status > state_{ spinlock_status::unlocked }; public: atomic_spinlock() noexcept = default; diff --git a/src/detail/spinlock.cpp b/src/detail/spinlock.cpp index 7a4ae20d..57a05c11 100644 --- a/src/detail/spinlock.cpp +++ b/src/detail/spinlock.cpp @@ -22,7 +22,7 @@ atomic_spinlock::lock() noexcept { // access to CPU's cache // first access to state_ -> cache miss // sucessive acccess to state_ -> cache hit - while ( atomic_spinlock_status::locked == state_.load( std::memory_order_relaxed) ) { + while ( spinlock_status::locked == state_.load( std::memory_order_relaxed) ) { // busy-wait std::this_thread::yield(); } @@ -30,13 +30,13 @@ atomic_spinlock::lock() noexcept { // cached copies are invalidated -> cache miss // test-and-set signaled over the bus } - while ( atomic_spinlock_status::unlocked != state_.exchange( atomic_spinlock_status::locked, std::memory_order_acquire) ); + while ( spinlock_status::unlocked != state_.exchange( spinlock_status::locked, std::memory_order_acquire) ); } void atomic_spinlock::unlock() noexcept { - BOOST_ASSERT( atomic_spinlock_status::locked == state_); - state_.store( atomic_spinlock_status::unlocked, std::memory_order_release); + BOOST_ASSERT( spinlock_status::locked == state_); + state_.store( spinlock_status::unlocked, std::memory_order_release); } }}}