#ifndef EC_META_SYSTEM_THREADPOOL_HPP #define EC_META_SYSTEM_THREADPOOL_HPP #include #include #include #include #include #include #include #include #include #include #include #ifndef NDEBUG #include #endif namespace EC { namespace Internal { using TPFnType = std::function; using TPTupleType = std::tuple; using TPQueueType = std::queue; using ThreadPtr = std::unique_ptr; using ThreadStackType = std::vector>; using ThreadStacksType = std::deque; using ThreadStacksMutexesT = std::deque; using ThreadCountersT = std::deque; using PointersT = std::tuple; } // namespace Internal /*! \brief Implementation of a Thread Pool. Note that if SIZE is less than 2, then ThreadPool will not create threads and run queued functions on the calling thread. */ template class ThreadPool { public: ThreadPool() : threadStacks{}, threadStackMutexes{}, fnQueue{}, queueMutex{} {} ~ThreadPool() { while (!isNotRunning()) { std::this_thread::sleep_for(std::chrono::microseconds(30)); } } /*! \brief Queues a function to be called (doesn't start calling yet). To run the queued functions, wakeThreads() must be called to wake the waiting threads which will start pulling functions from the queue to be called. */ void queueFn(std::function &&fn, void *ud = nullptr) { std::lock_guard lock(queueMutex); fnQueue.emplace(std::make_tuple(fn, ud)); } void startThreads() { if (MAXSIZE >= 2) { checkStacks(); auto pointers = newStackEntry(); Internal::ThreadStackType *threadStack = std::get<0>(pointers); std::mutex *threadStackMutex = std::get<1>(pointers); std::atomic_uint *aCounter = std::get<2>(pointers); for (unsigned int i = 0; i < MAXSIZE; ++i) { std::thread *newThread = new std::thread( [](Internal::ThreadStackType *threadStack, std::mutex *threadStackMutex, Internal::TPQueueType *fnQueue, std::mutex *queueMutex, std::atomic_uint *initCount) { // add id to idStack "call stack" { std::lock_guard lock(*threadStackMutex); threadStack->push_back( {Internal::ThreadPtr(nullptr), std::this_thread::get_id()}); } ++(*initCount); // fetch queued fns and execute them // fnTuples must live until end of function std::list fnTuples; do { bool fnFound = false; { std::lock_guard lock(*queueMutex); if (!fnQueue->empty()) { fnTuples.emplace_back( std::move(fnQueue->front())); fnQueue->pop(); fnFound = true; } } if (fnFound) { std::get<0>(fnTuples.back())( std::get<1>(fnTuples.back())); } else { break; } } while (true); // pop id from idStack "call stack" do { std::this_thread::sleep_for( std::chrono::microseconds(15)); if (initCount->load() != MAXSIZE) { continue; } { std::lock_guard lock( *threadStackMutex); if (std::get<1>(threadStack->back()) == std::this_thread::get_id()) { if (!std::get<0>(threadStack->back())) { continue; } std::get<0>(threadStack->back())->detach(); threadStack->pop_back(); if (threadStack->empty()) { initCount->store(0); } break; } } } while (true); }, threadStack, threadStackMutex, &fnQueue, &queueMutex, aCounter); while (aCounter->load() != i + 1) { std::this_thread::sleep_for(std::chrono::microseconds(15)); } std::lock_guard stackLock(*threadStackMutex); std::get<0>(threadStack->at(i)).reset(newThread); } while (aCounter->load() != MAXSIZE) { std::this_thread::sleep_for(std::chrono::microseconds(15)); } } else { sequentiallyRunTasks(); } } /*! \brief Returns true if the function queue is empty. */ bool isQueueEmpty() { std::lock_guard lock(queueMutex); return fnQueue.empty(); } /*! \brief Returns the ThreadCount that this class was created with. */ constexpr unsigned int getMaxThreadCount() { return MAXSIZE; } void easyStartAndWait() { if (MAXSIZE >= 2) { startThreads(); do { std::this_thread::sleep_for(std::chrono::microseconds(30)); bool isQueueEmpty = false; { std::lock_guard lock(queueMutex); isQueueEmpty = fnQueue.empty(); } if (isQueueEmpty) { break; } } while (true); } else { sequentiallyRunTasks(); } } bool isNotRunning() { std::lock_guard lock(dequesMutex); auto tIter = threadStacks.begin(); auto mIter = threadStackMutexes.begin(); while (tIter != threadStacks.end() && mIter != threadStackMutexes.end()) { { std::lock_guard lock(*mIter); if (!tIter->empty()) { return false; } } ++tIter; ++mIter; } return true; } private: Internal::ThreadStacksType threadStacks; Internal::ThreadStacksMutexesT threadStackMutexes; Internal::TPQueueType fnQueue; std::mutex queueMutex; Internal::ThreadCountersT threadCounters; std::mutex dequesMutex; void sequentiallyRunTasks() { // pull functions from queue and run them on current thread Internal::TPTupleType fnTuple; bool hasFn; do { { std::lock_guard lock(queueMutex); if (!fnQueue.empty()) { hasFn = true; fnTuple = fnQueue.front(); fnQueue.pop(); } else { hasFn = false; } } if (hasFn) { std::get<0>(fnTuple)(std::get<1>(fnTuple)); } } while (hasFn); } void checkStacks() { std::lock_guard lock(dequesMutex); if (threadStacks.empty()) { return; } bool erased = false; do { erased = false; { std::lock_guard lock(threadStackMutexes.front()); if (threadStacks.front().empty()) { threadStacks.pop_front(); threadCounters.pop_front(); erased = true; } } if (erased) { threadStackMutexes.pop_front(); } else { break; } } while (!threadStacks.empty() && !threadStackMutexes.empty() && !threadCounters.empty()); } Internal::PointersT newStackEntry() { std::lock_guard lock(dequesMutex); threadStacks.emplace_back(); threadStackMutexes.emplace_back(); threadCounters.emplace_back(); threadCounters.back().store(0); return {&threadStacks.back(), &threadStackMutexes.back(), &threadCounters.back()}; } }; } // namespace EC #endif