| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | #include "thread/thread_pool.h" |
| |
|
| | #include <stdint.h> |
| |
|
| | #include <algorithm> |
| | #include <atomic> |
| | #include <cstddef> |
| | #include <memory> |
| | #include <mutex> |
| | #include <thread> |
| | #include <utility> |
| | #include <vector> |
| |
|
| | #include <mujoco/mjsan.h> |
| | #include <mujoco/mjthread.h> |
| | #include <mujoco/mujoco.h> |
| | #include "engine/engine_crossplatform.h" |
| | #include "engine/engine_util_errmem.h" |
| | #include "thread/thread_queue.h" |
| | #include "thread/thread_task.h" |
| |
|
| | namespace mujoco { |
| | namespace { |
| | constexpr size_t kThreadPoolQueueSize = 640; |
| |
|
| | |
| | |
| | |
| | thread_local size_t worker_id = 0; |
| |
|
| | struct WorkerThread { |
| | |
| | static void* ShutdownFunction(void* args) { |
| | return nullptr; |
| | } |
| |
|
| | |
| | std::unique_ptr<std::thread> thread_; |
| |
|
| | |
| | mjTask shutdown_task_ { |
| | &ShutdownFunction, |
| | nullptr, |
| | mjTASK_NEW |
| | }; |
| | }; |
| | } |
| |
|
| | |
| | |
| | class ThreadPoolImpl : public mjThreadPool { |
| | public: |
| | ThreadPoolImpl(int num_worker) : mjThreadPool{num_worker} { |
| | |
| | for (int i = 0; i < std::min(num_worker, mjMAXTHREAD); ++i) { |
| | WorkerThread worker{ |
| | std::make_unique<std::thread>(ThreadPoolWorker, this, i)}; |
| | workers_.push_back(std::move(worker)); |
| | } |
| | } |
| |
|
| | size_t NumberOfThreads() { |
| | return workers_.size(); |
| | } |
| |
|
| | |
| | void Enqueue(mjTask* task) { |
| | if (mjUNLIKELY(GetAtomicTaskStatus(task).exchange(mjTASK_QUEUED) != |
| | mjTASK_NEW)) { |
| | mjERROR("task->status is not mjTASK_NEW"); |
| | } |
| | lockless_queue_.push(task); |
| | } |
| |
|
| | |
| | void Shutdown() { |
| | if (shutdown_) { |
| | return; |
| | } |
| |
|
| | shutdown_ = true; |
| | std::vector<mjTask> shutdown_tasks(workers_.size()); |
| | for (auto& worker : workers_) { |
| | Enqueue(&worker.shutdown_task_); |
| | } |
| |
|
| | for (auto& worker : workers_) { |
| | worker.thread_->join(); |
| | } |
| | } |
| |
|
| | |
| | void RegisterWorker(const size_t input_worker_id) { |
| | worker_id = input_worker_id; |
| | } |
| |
|
| | |
| | size_t GetWorkerId() { |
| | return worker_id; |
| | } |
| |
|
| | void LockAlloc() { |
| | alloc_mutex_.lock(); |
| | } |
| |
|
| | void UnlockAlloc() { |
| | alloc_mutex_.unlock(); |
| | } |
| |
|
| | bool IsThreadPoolBound() { |
| | return thread_pool_bound_; |
| | } |
| |
|
| | void BindThreadPool() { |
| | thread_pool_bound_ = true; |
| | } |
| |
|
| | ~ThreadPoolImpl() { Shutdown(); } |
| |
|
| | private: |
| | |
| | static void ThreadPoolWorker( |
| | ThreadPoolImpl* thread_pool, const size_t thread_index) { |
| | worker_id = thread_index + 1; |
| | while (!thread_pool->shutdown_) { |
| | auto task = static_cast<mjTask*>(thread_pool->lockless_queue_.pop()); |
| | task->args = task->func(task->args); |
| | GetAtomicTaskStatus(task).store(mjTASK_COMPLETED); |
| | } |
| | } |
| |
|
| | |
| | std::atomic<bool> shutdown_ = false; |
| |
|
| | |
| | std::vector<WorkerThread> workers_; |
| |
|
| | |
| | mujoco::LocklessQueue<void*, kThreadPoolQueueSize> lockless_queue_; |
| |
|
| | |
| | std::mutex alloc_mutex_; |
| |
|
| | |
| | bool thread_pool_bound_ = false; |
| | }; |
| |
|
| | |
| | mjThreadPool* mju_threadPoolCreate(size_t number_of_threads) { |
| | return reinterpret_cast<mjThreadPool*>(new ThreadPoolImpl(number_of_threads)); |
| | } |
| |
|
| | |
| | static size_t GetNumberOfShards(mjData* d) { |
| | if (!d->threadpool) { |
| | return 1; |
| | } |
| | return mju_threadPoolNumberOfThreads((mjThreadPool*)d->threadpool) + 1; |
| | } |
| |
|
| | |
| | mjStackInfo* mju_getStackInfoForThread(mjData* d, size_t thread_id) { |
| | auto thread_pool = (ThreadPoolImpl*)d->threadpool; |
| | if (!thread_pool || !thread_pool->IsThreadPoolBound()) { |
| | mju_error("Thread Pool not bound, use mju_bindThreadPool to add an mjThreadPool to mjData"); |
| | } |
| |
|
| | |
| | size_t number_of_shards = GetNumberOfShards(d); |
| |
|
| | |
| | size_t total_arena_size_bytes = d->narena; |
| |
|
| | |
| | uintptr_t end_of_arena_ptr = (uintptr_t)d->arena + total_arena_size_bytes; |
| |
|
| | |
| | size_t bytes_per_shard = total_arena_size_bytes / (2 * (number_of_shards)); |
| |
|
| | |
| | size_t misalignment = bytes_per_shard % mju_getDestructiveInterferenceSize(); |
| |
|
| | if (misalignment != 0) { |
| | bytes_per_shard += mju_getDestructiveInterferenceSize() - misalignment; |
| | } |
| |
|
| | if (bytes_per_shard * number_of_shards > total_arena_size_bytes) { |
| | mju_error("Arena is not large enough for %zu shards", number_of_shards); |
| | } |
| |
|
| | uintptr_t result = (end_of_arena_ptr - (thread_id + 1) * bytes_per_shard); |
| |
|
| | |
| | misalignment = result % alignof(mjStackInfo); |
| | result -= misalignment; |
| | #ifdef ADDRESS_SANITIZER |
| | |
| | ASAN_UNPOISON_MEMORY_REGION((void*)result, sizeof(mjStackInfo)); |
| | #endif |
| | return (mjStackInfo*) result; |
| | } |
| |
|
| | |
| | static void ConfigureMultiThreadedStack(mjData* d) { |
| | if (!d->threadpool) { |
| | mju_error("No thread pool specified for multithreaded operation"); |
| | } |
| |
|
| | size_t number_of_shards = GetNumberOfShards(d); |
| |
|
| | |
| | uintptr_t current_limit = (uintptr_t)d->arena + d->narena - d->pstack; |
| |
|
| | |
| | uintptr_t begin_shard_cursor_ptr = (uintptr_t)d->arena + d->narena; |
| |
|
| | for (size_t shard_index = 0; shard_index < number_of_shards; ++shard_index) { |
| | mjStackInfo* end_shard_cursor_ptr = mju_getStackInfoForThread(d, shard_index); |
| | #ifdef ADDRESS_SANITIZER |
| | |
| | ASAN_UNPOISON_MEMORY_REGION((void*)end_shard_cursor_ptr, sizeof(mjStackInfo)); |
| | #endif |
| | |
| | if (shard_index == 0) { |
| | |
| | |
| | if ((uintptr_t)end_shard_cursor_ptr > current_limit) { |
| | mju_error("mj_bindThreadPool: sharding stack - existing stack larger than shard size: current_size = %zu, " |
| | "max_size = %zu", current_limit, (uintptr_t) end_shard_cursor_ptr); |
| | } |
| | end_shard_cursor_ptr->top = current_limit; |
| | end_shard_cursor_ptr->stack_base = d->pbase; |
| | } else { |
| | |
| | end_shard_cursor_ptr->top = begin_shard_cursor_ptr; |
| | end_shard_cursor_ptr->stack_base = 0; |
| | } |
| |
|
| | end_shard_cursor_ptr->bottom = begin_shard_cursor_ptr; |
| | end_shard_cursor_ptr->limit = (uintptr_t)end_shard_cursor_ptr + sizeof(mjStackInfo); |
| | begin_shard_cursor_ptr = (uintptr_t)end_shard_cursor_ptr - 1; |
| | } |
| | } |
| |
|
| | |
| | void mju_bindThreadPool(mjData* d, void* thread_pool) { |
| | if (d->threadpool) { |
| | mju_error("Thread Pool already bound to mjData"); |
| | } |
| |
|
| | d->threadpool = (uintptr_t) thread_pool; |
| | ((ThreadPoolImpl*)thread_pool)->BindThreadPool(); |
| | ConfigureMultiThreadedStack(d); |
| | } |
| |
|
| | |
| | size_t mju_threadPoolNumberOfThreads(mjThreadPool* thread_pool) { |
| | auto thread_pool_impl = static_cast<ThreadPoolImpl*>(thread_pool); |
| | return thread_pool_impl->NumberOfThreads(); |
| | } |
| |
|
| | size_t mju_threadPoolCurrentWorkerId(mjThreadPool* thread_pool) { |
| | auto thread_pool_impl = static_cast<ThreadPoolImpl*>(thread_pool); |
| | return thread_pool_impl->GetWorkerId(); |
| | } |
| |
|
| | |
| | void mju_threadPoolEnqueue(mjThreadPool* thread_pool, mjTask* task) { |
| | auto thread_pool_impl = static_cast<ThreadPoolImpl*>(thread_pool); |
| | thread_pool_impl->Enqueue(task); |
| | } |
| |
|
| | |
| | void mju_threadPoolDestroy(mjThreadPool* thread_pool) { |
| | auto thread_pool_impl = static_cast<ThreadPoolImpl*>(thread_pool); |
| | thread_pool_impl->Shutdown(); |
| | delete thread_pool_impl; |
| | } |
| |
|
| | |
| | void mju_threadPoolLockAllocMutex(mjThreadPool* thread_pool) { |
| | auto thread_pool_impl = static_cast<ThreadPoolImpl*>(thread_pool); |
| | thread_pool_impl->LockAlloc(); |
| | } |
| |
|
| | |
| | void mju_threadPoolUnlockAllocMutex(mjThreadPool* thread_pool) { |
| | auto thread_pool_impl = static_cast<ThreadPoolImpl*>(thread_pool); |
| | thread_pool_impl->UnlockAlloc(); |
| | } |
| |
|
| | |
| | size_t mju_getDestructiveInterferenceSize(void) { |
| | |
| | return 128; |
| | } |
| |
|
| | } |
| |
|