]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/llvm-project/lldb/source/Host/common/TaskPool.cpp
Merge llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp
[FreeBSD/FreeBSD.git] / contrib / llvm-project / lldb / source / Host / common / TaskPool.cpp
1 //===--------------------- TaskPool.cpp -------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "lldb/Host/TaskPool.h"
10 #include "lldb/Host/ThreadLauncher.h"
11 #include "lldb/Utility/Log.h"
12
13 #include <cstdint>
14 #include <queue>
15 #include <thread>
16
17 namespace lldb_private {
18
19 namespace {
20 class TaskPoolImpl {
21 public:
22   static TaskPoolImpl &GetInstance();
23
24   void AddTask(std::function<void()> &&task_fn);
25
26 private:
27   TaskPoolImpl();
28
29   static lldb::thread_result_t WorkerPtr(void *pool);
30
31   static void Worker(TaskPoolImpl *pool);
32
33   std::queue<std::function<void()>> m_tasks;
34   std::mutex m_tasks_mutex;
35   uint32_t m_thread_count;
36 };
37
38 } // end of anonymous namespace
39
40 TaskPoolImpl &TaskPoolImpl::GetInstance() {
41   static TaskPoolImpl g_task_pool_impl;
42   return g_task_pool_impl;
43 }
44
45 void TaskPool::AddTaskImpl(std::function<void()> &&task_fn) {
46   TaskPoolImpl::GetInstance().AddTask(std::move(task_fn));
47 }
48
49 TaskPoolImpl::TaskPoolImpl() : m_thread_count(0) {}
50
51 unsigned GetHardwareConcurrencyHint() {
52   // std::thread::hardware_concurrency may return 0 if the value is not well
53   // defined or not computable.
54   static const unsigned g_hardware_concurrency = 
55     std::max(1u, std::thread::hardware_concurrency());
56   return g_hardware_concurrency;
57 }
58
59 void TaskPoolImpl::AddTask(std::function<void()> &&task_fn) {
60   const size_t min_stack_size = 8 * 1024 * 1024;
61
62   std::unique_lock<std::mutex> lock(m_tasks_mutex);
63   m_tasks.emplace(std::move(task_fn));
64   if (m_thread_count < GetHardwareConcurrencyHint()) {
65     m_thread_count++;
66     // Note that this detach call needs to happen with the m_tasks_mutex held.
67     // This prevents the thread from exiting prematurely and triggering a linux
68     // libc bug (https://sourceware.org/bugzilla/show_bug.cgi?id=19951).
69     llvm::Expected<HostThread> host_thread =
70         lldb_private::ThreadLauncher::LaunchThread(
71             "task-pool.worker", WorkerPtr, this, min_stack_size);
72     if (host_thread) {
73       host_thread->Release();
74     } else {
75       LLDB_LOG(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_HOST),
76                "failed to launch host thread: {}",
77                llvm::toString(host_thread.takeError()));
78     }
79   }
80 }
81
82 lldb::thread_result_t TaskPoolImpl::WorkerPtr(void *pool) {
83   Worker((TaskPoolImpl *)pool);
84   return {};
85 }
86
87 void TaskPoolImpl::Worker(TaskPoolImpl *pool) {
88   while (true) {
89     std::unique_lock<std::mutex> lock(pool->m_tasks_mutex);
90     if (pool->m_tasks.empty()) {
91       pool->m_thread_count--;
92       break;
93     }
94
95     std::function<void()> f = std::move(pool->m_tasks.front());
96     pool->m_tasks.pop();
97     lock.unlock();
98
99     f();
100   }
101 }
102
103 void TaskMapOverInt(size_t begin, size_t end,
104                     const llvm::function_ref<void(size_t)> &func) {
105   const size_t num_workers = std::min<size_t>(end, GetHardwareConcurrencyHint());
106   std::atomic<size_t> idx{begin};
107   
108   auto wrapper = [&idx, end, &func]() {
109     while (true) {
110       size_t i = idx.fetch_add(1);
111       if (i >= end)
112         break;
113       func(i);
114     }
115   };
116
117   std::vector<std::future<void>> futures;
118   futures.reserve(num_workers);
119   for (size_t i = 0; i < num_workers; i++)
120     futures.push_back(TaskPool::AddTask(wrapper));
121   for (size_t i = 0; i < num_workers; i++)
122     futures[i].wait();
123 }
124
125 } // namespace lldb_private
126