1 //===-- llvm/Support/ThreadPool.h - A ThreadPool implementation -*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines a crude C++11 based thread pool.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_SUPPORT_THREADPOOL_H
14 #define LLVM_SUPPORT_THREADPOOL_H
16 #include "llvm/Config/llvm-config.h"
17 #include "llvm/Support/Threading.h"
18 #include "llvm/Support/thread.h"
23 #include <condition_variable>
32 /// A ThreadPool for asynchronous parallel execution on a defined number of
35 /// The pool keeps a vector of threads alive, waiting on a condition variable
36 /// for some work to become available.
39 /// Construct a pool using the hardware strategy \p S for mapping hardware
40 /// execution resources (threads, cores, CPUs)
41 /// Defaults to using the maximum execution resources in the system, but
42 /// accounting for the affinity mask.
43 ThreadPool(ThreadPoolStrategy S = hardware_concurrency());
45 /// Blocking destructor: the pool will wait for all the threads to complete.
48 /// Asynchronous submission of a task to the pool. The returned future can be
49 /// used to wait for the task to finish and is *non-blocking* on destruction.
50 template <typename Function, typename... Args>
51 inline auto async(Function &&F, Args &&...ArgList) {
53 std::bind(std::forward<Function>(F), std::forward<Args>(ArgList)...);
54 return async(std::move(Task));
57 /// Asynchronous submission of a task to the pool. The returned future can be
58 /// used to wait for the task to finish and is *non-blocking* on destruction.
59 template <typename Func>
60 auto async(Func &&F) -> std::shared_future<decltype(F())> {
61 return asyncImpl(std::function<decltype(F())()>(std::forward<Func>(F)));
64 /// Blocking wait for all the threads to complete and the queue to be empty.
65 /// It is an error to try to add new tasks while blocking on this call.
68 // TODO: misleading legacy name warning!
69 // Returns the maximum number of worker threads in the pool, not the current
71 unsigned getThreadCount() const { return MaxThreadCount; }
73 /// Returns true if the current thread is a worker thread of this thread pool.
74 bool isWorkerThread() const;
77 /// Helpers to create a promise and a callable wrapper of \p Task that sets
78 /// the result of the promise. Returns the callable and a future to access the
80 template <typename ResTy>
81 static std::pair<std::function<void()>, std::future<ResTy>>
82 createTaskAndFuture(std::function<ResTy()> Task) {
83 std::shared_ptr<std::promise<ResTy>> Promise =
84 std::make_shared<std::promise<ResTy>>();
85 auto F = Promise->get_future();
87 [Promise = std::move(Promise), Task]() { Promise->set_value(Task()); },
90 static std::pair<std::function<void()>, std::future<void>>
91 createTaskAndFuture(std::function<void()> Task) {
92 std::shared_ptr<std::promise<void>> Promise =
93 std::make_shared<std::promise<void>>();
94 auto F = Promise->get_future();
95 return {[Promise = std::move(Promise), Task]() {
102 bool workCompletedUnlocked() { return !ActiveThreads && Tasks.empty(); }
104 /// Asynchronous submission of a task to the pool. The returned future can be
105 /// used to wait for the task to finish and is *non-blocking* on destruction.
106 template <typename ResTy>
107 std::shared_future<ResTy> asyncImpl(std::function<ResTy()> Task) {
109 #if LLVM_ENABLE_THREADS
110 /// Wrap the Task in a std::function<void()> that sets the result of the
111 /// corresponding future.
112 auto R = createTaskAndFuture(Task);
114 int requestedThreads;
116 // Lock the queue and push the new task
117 std::unique_lock<std::mutex> LockGuard(QueueLock);
119 // Don't allow enqueueing after disabling the pool
120 assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
121 Tasks.push(std::move(R.first));
122 requestedThreads = ActiveThreads + Tasks.size();
124 QueueCondition.notify_one();
125 grow(requestedThreads);
126 return R.second.share();
128 #else // LLVM_ENABLE_THREADS Disabled
130 // Get a Future with launch::deferred execution using std::async
131 auto Future = std::async(std::launch::deferred, std::move(Task)).share();
132 // Wrap the future so that both ThreadPool::wait() can operate and the
133 // returned future can be sync'ed on.
134 Tasks.push([Future]() { Future.get(); });
139 #if LLVM_ENABLE_THREADS
140 // Grow to ensure that we have at least `requested` Threads, but do not go
141 // over MaxThreadCount.
142 void grow(int requested);
145 /// Threads in flight
146 std::vector<llvm::thread> Threads;
147 /// Lock protecting access to the Threads vector.
148 mutable std::mutex ThreadsLock;
150 /// Tasks waiting for execution in the pool.
151 std::queue<std::function<void()>> Tasks;
153 /// Locking and signaling for accessing the Tasks queue.
154 std::mutex QueueLock;
155 std::condition_variable QueueCondition;
157 /// Signaling for job completion
158 std::condition_variable CompletionCondition;
160 /// Keep track of the number of thread actually busy
161 unsigned ActiveThreads = 0;
163 #if LLVM_ENABLE_THREADS // avoids warning for unused variable
164 /// Signal for the destruction of the pool, asking thread to exit.
165 bool EnableFlag = true;
168 const ThreadPoolStrategy Strategy;
170 /// Maximum number of threads to potentially grow this pool to.
171 const unsigned MaxThreadCount;
175 #endif // LLVM_SUPPORT_THREADPOOL_H