| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | #ifndef EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ |
| | #define EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ |
| |
|
| | namespace Eigen { |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | class EventCount { |
| | public: |
| | class Waiter; |
| |
|
| | EventCount(MaxSizeVector<Waiter>& waiters) |
| | : state_(kStackMask), waiters_(waiters) { |
| | eigen_plain_assert(waiters.size() < (1 << kWaiterBits) - 1); |
| | } |
| |
|
| | ~EventCount() { |
| | |
| | eigen_plain_assert(state_.load() == kStackMask); |
| | } |
| |
|
| | |
| | |
| | |
| | void Prewait() { |
| | uint64_t state = state_.load(std::memory_order_relaxed); |
| | for (;;) { |
| | CheckState(state); |
| | uint64_t newstate = state + kWaiterInc; |
| | CheckState(newstate); |
| | if (state_.compare_exchange_weak(state, newstate, |
| | std::memory_order_seq_cst)) |
| | return; |
| | } |
| | } |
| |
|
| | |
| | void CommitWait(Waiter* w) { |
| | eigen_plain_assert((w->epoch & ~kEpochMask) == 0); |
| | w->state = Waiter::kNotSignaled; |
| | const uint64_t me = (w - &waiters_[0]) | w->epoch; |
| | uint64_t state = state_.load(std::memory_order_seq_cst); |
| | for (;;) { |
| | CheckState(state, true); |
| | uint64_t newstate; |
| | if ((state & kSignalMask) != 0) { |
| | |
| | newstate = state - kWaiterInc - kSignalInc; |
| | } else { |
| | |
| | newstate = ((state & kWaiterMask) - kWaiterInc) | me; |
| | w->next.store(state & (kStackMask | kEpochMask), |
| | std::memory_order_relaxed); |
| | } |
| | CheckState(newstate); |
| | if (state_.compare_exchange_weak(state, newstate, |
| | std::memory_order_acq_rel)) { |
| | if ((state & kSignalMask) == 0) { |
| | w->epoch += kEpochInc; |
| | Park(w); |
| | } |
| | return; |
| | } |
| | } |
| | } |
| |
|
| | |
| | void CancelWait() { |
| | uint64_t state = state_.load(std::memory_order_relaxed); |
| | for (;;) { |
| | CheckState(state, true); |
| | uint64_t newstate = state - kWaiterInc; |
| | |
| | |
| | |
| | |
| | if (((state & kWaiterMask) >> kWaiterShift) == |
| | ((state & kSignalMask) >> kSignalShift)) |
| | newstate -= kSignalInc; |
| | CheckState(newstate); |
| | if (state_.compare_exchange_weak(state, newstate, |
| | std::memory_order_acq_rel)) |
| | return; |
| | } |
| | } |
| |
|
| | |
| | |
| | void Notify(bool notifyAll) { |
| | std::atomic_thread_fence(std::memory_order_seq_cst); |
| | uint64_t state = state_.load(std::memory_order_acquire); |
| | for (;;) { |
| | CheckState(state); |
| | const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; |
| | const uint64_t signals = (state & kSignalMask) >> kSignalShift; |
| | |
| | if ((state & kStackMask) == kStackMask && waiters == signals) return; |
| | uint64_t newstate; |
| | if (notifyAll) { |
| | |
| | newstate = |
| | (state & kWaiterMask) | (waiters << kSignalShift) | kStackMask; |
| | } else if (signals < waiters) { |
| | |
| | newstate = state + kSignalInc; |
| | } else { |
| | |
| | Waiter* w = &waiters_[state & kStackMask]; |
| | uint64_t next = w->next.load(std::memory_order_relaxed); |
| | newstate = (state & (kWaiterMask | kSignalMask)) | next; |
| | } |
| | CheckState(newstate); |
| | if (state_.compare_exchange_weak(state, newstate, |
| | std::memory_order_acq_rel)) { |
| | if (!notifyAll && (signals < waiters)) |
| | return; |
| | if ((state & kStackMask) == kStackMask) return; |
| | Waiter* w = &waiters_[state & kStackMask]; |
| | if (!notifyAll) w->next.store(kStackMask, std::memory_order_relaxed); |
| | Unpark(w); |
| | return; |
| | } |
| | } |
| | } |
| |
|
| | class Waiter { |
| | friend class EventCount; |
| | |
| | |
| | EIGEN_ALIGN_TO_BOUNDARY(128) std::atomic<uint64_t> next; |
| | std::mutex mu; |
| | std::condition_variable cv; |
| | uint64_t epoch = 0; |
| | unsigned state = kNotSignaled; |
| | enum { |
| | kNotSignaled, |
| | kWaiting, |
| | kSignaled, |
| | }; |
| | }; |
| |
|
| | private: |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | static const uint64_t kWaiterBits = 14; |
| | static const uint64_t kStackMask = (1ull << kWaiterBits) - 1; |
| | static const uint64_t kWaiterShift = kWaiterBits; |
| | static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1) |
| | << kWaiterShift; |
| | static const uint64_t kWaiterInc = 1ull << kWaiterShift; |
| | static const uint64_t kSignalShift = 2 * kWaiterBits; |
| | static const uint64_t kSignalMask = ((1ull << kWaiterBits) - 1) |
| | << kSignalShift; |
| | static const uint64_t kSignalInc = 1ull << kSignalShift; |
| | static const uint64_t kEpochShift = 3 * kWaiterBits; |
| | static const uint64_t kEpochBits = 64 - kEpochShift; |
| | static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift; |
| | static const uint64_t kEpochInc = 1ull << kEpochShift; |
| | std::atomic<uint64_t> state_; |
| | MaxSizeVector<Waiter>& waiters_; |
| |
|
| | static void CheckState(uint64_t state, bool waiter = false) { |
| | static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem"); |
| | const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; |
| | const uint64_t signals = (state & kSignalMask) >> kSignalShift; |
| | eigen_plain_assert(waiters >= signals); |
| | eigen_plain_assert(waiters < (1 << kWaiterBits) - 1); |
| | eigen_plain_assert(!waiter || waiters > 0); |
| | (void)waiters; |
| | (void)signals; |
| | } |
| |
|
| | void Park(Waiter* w) { |
| | std::unique_lock<std::mutex> lock(w->mu); |
| | while (w->state != Waiter::kSignaled) { |
| | w->state = Waiter::kWaiting; |
| | w->cv.wait(lock); |
| | } |
| | } |
| |
|
| | void Unpark(Waiter* w) { |
| | for (Waiter* next; w; w = next) { |
| | uint64_t wnext = w->next.load(std::memory_order_relaxed) & kStackMask; |
| | next = wnext == kStackMask ? nullptr : &waiters_[wnext]; |
| | unsigned state; |
| | { |
| | std::unique_lock<std::mutex> lock(w->mu); |
| | state = w->state; |
| | w->state = Waiter::kSignaled; |
| | } |
| | |
| | if (state == Waiter::kWaiting) w->cv.notify_one(); |
| | } |
| | } |
| |
|
| | EventCount(const EventCount&) = delete; |
| | void operator=(const EventCount&) = delete; |
| | }; |
| |
|
| | } |
| |
|
| | #endif |
| |
|