]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/llvm/tools/lldb/source/Host/common/TaskPool.cpp
Import DTS files from Linux 4.18
[FreeBSD/FreeBSD.git] / contrib / llvm / tools / lldb / source / Host / common / TaskPool.cpp
1 //===--------------------- TaskPool.cpp -------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "lldb/Host/TaskPool.h"
11 #include "lldb/Host/ThreadLauncher.h"
12
13 #include <cstdint> // for uint32_t
14 #include <queue>   // for queue
15 #include <thread>  // for thread
16
17 namespace lldb_private {
18
19 namespace {
20 class TaskPoolImpl {
21 public:
22   static TaskPoolImpl &GetInstance();
23
24   void AddTask(std::function<void()> &&task_fn);
25
26 private:
27   TaskPoolImpl();
28
29   static lldb::thread_result_t WorkerPtr(void *pool);
30
31   static void Worker(TaskPoolImpl *pool);
32
33   std::queue<std::function<void()>> m_tasks;
34   std::mutex m_tasks_mutex;
35   uint32_t m_thread_count;
36 };
37
38 } // end of anonymous namespace
39
40 TaskPoolImpl &TaskPoolImpl::GetInstance() {
41   static TaskPoolImpl g_task_pool_impl;
42   return g_task_pool_impl;
43 }
44
45 void TaskPool::AddTaskImpl(std::function<void()> &&task_fn) {
46   TaskPoolImpl::GetInstance().AddTask(std::move(task_fn));
47 }
48
49 TaskPoolImpl::TaskPoolImpl() : m_thread_count(0) {}
50
51 unsigned GetHardwareConcurrencyHint() {
52   // std::thread::hardware_concurrency may return 0
53   // if the value is not well defined or not computable.
54   static const unsigned g_hardware_concurrency = 
55     std::max(1u, std::thread::hardware_concurrency());
56   return g_hardware_concurrency;
57 }
58
59 void TaskPoolImpl::AddTask(std::function<void()> &&task_fn) {
60   const size_t min_stack_size = 8 * 1024 * 1024;
61
62   std::unique_lock<std::mutex> lock(m_tasks_mutex);
63   m_tasks.emplace(std::move(task_fn));
64   if (m_thread_count < GetHardwareConcurrencyHint()) {
65     m_thread_count++;
66     // Note that this detach call needs to happen with the m_tasks_mutex held.
67     // This prevents the thread
68     // from exiting prematurely and triggering a linux libc bug
69     // (https://sourceware.org/bugzilla/show_bug.cgi?id=19951).
70     lldb_private::ThreadLauncher::LaunchThread("task-pool.worker", WorkerPtr,
71                                                this, nullptr, min_stack_size)
72         .Release();
73   }
74 }
75
76 lldb::thread_result_t TaskPoolImpl::WorkerPtr(void *pool) {
77   Worker((TaskPoolImpl *)pool);
78   return 0;
79 }
80
81 void TaskPoolImpl::Worker(TaskPoolImpl *pool) {
82   while (true) {
83     std::unique_lock<std::mutex> lock(pool->m_tasks_mutex);
84     if (pool->m_tasks.empty()) {
85       pool->m_thread_count--;
86       break;
87     }
88
89     std::function<void()> f = std::move(pool->m_tasks.front());
90     pool->m_tasks.pop();
91     lock.unlock();
92
93     f();
94   }
95 }
96
97 void TaskMapOverInt(size_t begin, size_t end,
98                     const llvm::function_ref<void(size_t)> &func) {
99   const size_t num_workers = std::min<size_t>(end, GetHardwareConcurrencyHint());
100   std::atomic<size_t> idx{begin};
101   
102   auto wrapper = [&idx, end, &func]() {
103     while (true) {
104       size_t i = idx.fetch_add(1);
105       if (i >= end)
106         break;
107       func(i);
108     }
109   };
110
111   std::vector<std::future<void>> futures;
112   futures.reserve(num_workers);
113   for (size_t i = 0; i < num_workers; i++)
114     futures.push_back(TaskPool::AddTask(wrapper));
115   for (size_t i = 0; i < num_workers; i++)
116     futures[i].wait();
117 }
118
119 } // namespace lldb_private
120