1 //===-- MachThreadList.cpp --------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Created by Greg Clayton on 6/19/07.
12 //===----------------------------------------------------------------------===//
14 #include "MachThreadList.h"
17 #include <sys/sysctl.h>
20 #include "DNBThreadResumeActions.h"
21 #include "MachProcess.h"
23 MachThreadList::MachThreadList()
24 : m_threads(), m_threads_mutex(PTHREAD_MUTEX_RECURSIVE),
27 MachThreadList::~MachThreadList() {}
29 nub_state_t MachThreadList::GetState(nub_thread_t tid) {
30 MachThreadSP thread_sp(GetThreadByID(tid));
32 return thread_sp->GetState();
36 const char *MachThreadList::GetName(nub_thread_t tid) {
37 MachThreadSP thread_sp(GetThreadByID(tid));
39 return thread_sp->GetName();
43 ThreadInfo::QoS MachThreadList::GetRequestedQoS(nub_thread_t tid,
45 uint64_t dti_qos_class_index) {
46 MachThreadSP thread_sp(GetThreadByID(tid));
48 return thread_sp->GetRequestedQoS(tsd, dti_qos_class_index);
49 return ThreadInfo::QoS();
52 nub_addr_t MachThreadList::GetPThreadT(nub_thread_t tid) {
53 MachThreadSP thread_sp(GetThreadByID(tid));
55 return thread_sp->GetPThreadT();
56 return INVALID_NUB_ADDRESS;
59 nub_addr_t MachThreadList::GetDispatchQueueT(nub_thread_t tid) {
60 MachThreadSP thread_sp(GetThreadByID(tid));
62 return thread_sp->GetDispatchQueueT();
63 return INVALID_NUB_ADDRESS;
66 nub_addr_t MachThreadList::GetTSDAddressForThread(
67 nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset,
68 uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size) {
69 MachThreadSP thread_sp(GetThreadByID(tid));
71 return thread_sp->GetTSDAddressForThread(
72 plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset,
73 plo_pthread_tsd_entry_size);
74 return INVALID_NUB_ADDRESS;
77 nub_thread_t MachThreadList::SetCurrentThread(nub_thread_t tid) {
78 MachThreadSP thread_sp(GetThreadByID(tid));
80 m_current_thread = thread_sp;
83 return INVALID_NUB_THREAD;
86 bool MachThreadList::GetThreadStoppedReason(
87 nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const {
88 MachThreadSP thread_sp(GetThreadByID(tid));
90 return thread_sp->GetStopException().GetStopInfo(stop_info);
94 bool MachThreadList::GetIdentifierInfo(
95 nub_thread_t tid, thread_identifier_info_data_t *ident_info) {
96 thread_t mach_port_number = GetMachPortNumberByThreadID(tid);
98 mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
99 return ::thread_info(mach_port_number, THREAD_IDENTIFIER_INFO,
100 (thread_info_t)ident_info, &count) == KERN_SUCCESS;
103 void MachThreadList::DumpThreadStoppedReason(nub_thread_t tid) const {
104 MachThreadSP thread_sp(GetThreadByID(tid));
106 thread_sp->GetStopException().DumpStopReason();
109 const char *MachThreadList::GetThreadInfo(nub_thread_t tid) const {
110 MachThreadSP thread_sp(GetThreadByID(tid));
112 return thread_sp->GetBasicInfoAsString();
116 MachThreadSP MachThreadList::GetThreadByID(nub_thread_t tid) const {
117 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
118 MachThreadSP thread_sp;
119 const size_t num_threads = m_threads.size();
120 for (size_t idx = 0; idx < num_threads; ++idx) {
121 if (m_threads[idx]->ThreadID() == tid) {
122 thread_sp = m_threads[idx];
130 MachThreadList::GetThreadByMachPortNumber(thread_t mach_port_number) const {
131 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
132 MachThreadSP thread_sp;
133 const size_t num_threads = m_threads.size();
134 for (size_t idx = 0; idx < num_threads; ++idx) {
135 if (m_threads[idx]->MachPortNumber() == mach_port_number) {
136 thread_sp = m_threads[idx];
144 MachThreadList::GetThreadIDByMachPortNumber(thread_t mach_port_number) const {
145 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
146 MachThreadSP thread_sp;
147 const size_t num_threads = m_threads.size();
148 for (size_t idx = 0; idx < num_threads; ++idx) {
149 if (m_threads[idx]->MachPortNumber() == mach_port_number) {
150 return m_threads[idx]->ThreadID();
153 return INVALID_NUB_THREAD;
156 thread_t MachThreadList::GetMachPortNumberByThreadID(
157 nub_thread_t globally_unique_id) const {
158 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
159 MachThreadSP thread_sp;
160 const size_t num_threads = m_threads.size();
161 for (size_t idx = 0; idx < num_threads; ++idx) {
162 if (m_threads[idx]->ThreadID() == globally_unique_id) {
163 return m_threads[idx]->MachPortNumber();
169 bool MachThreadList::GetRegisterValue(nub_thread_t tid, uint32_t set,
171 DNBRegisterValue *reg_value) const {
172 MachThreadSP thread_sp(GetThreadByID(tid));
174 return thread_sp->GetRegisterValue(set, reg, reg_value);
179 bool MachThreadList::SetRegisterValue(nub_thread_t tid, uint32_t set,
181 const DNBRegisterValue *reg_value) const {
182 MachThreadSP thread_sp(GetThreadByID(tid));
184 return thread_sp->SetRegisterValue(set, reg, reg_value);
189 nub_size_t MachThreadList::GetRegisterContext(nub_thread_t tid, void *buf,
191 MachThreadSP thread_sp(GetThreadByID(tid));
193 return thread_sp->GetRegisterContext(buf, buf_len);
197 nub_size_t MachThreadList::SetRegisterContext(nub_thread_t tid, const void *buf,
199 MachThreadSP thread_sp(GetThreadByID(tid));
201 return thread_sp->SetRegisterContext(buf, buf_len);
205 uint32_t MachThreadList::SaveRegisterState(nub_thread_t tid) {
206 MachThreadSP thread_sp(GetThreadByID(tid));
208 return thread_sp->SaveRegisterState();
212 bool MachThreadList::RestoreRegisterState(nub_thread_t tid, uint32_t save_id) {
213 MachThreadSP thread_sp(GetThreadByID(tid));
215 return thread_sp->RestoreRegisterState(save_id);
219 nub_size_t MachThreadList::NumThreads() const {
220 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
221 return m_threads.size();
224 nub_thread_t MachThreadList::ThreadIDAtIndex(nub_size_t idx) const {
225 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
226 if (idx < m_threads.size())
227 return m_threads[idx]->ThreadID();
228 return INVALID_NUB_THREAD;
231 nub_thread_t MachThreadList::CurrentThreadID() {
232 MachThreadSP thread_sp;
233 CurrentThread(thread_sp);
235 return thread_sp->ThreadID();
236 return INVALID_NUB_THREAD;
239 bool MachThreadList::NotifyException(MachException::Data &exc) {
240 MachThreadSP thread_sp(GetThreadByMachPortNumber(exc.thread_port));
242 thread_sp->NotifyException(exc);
248 void MachThreadList::Clear() {
249 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
254 MachThreadList::UpdateThreadList(MachProcess *process, bool update,
255 MachThreadList::collection *new_threads) {
256 // locker will keep a mutex locked until it goes out of scope
257 DNBLogThreadedIf(LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, "
258 "update = %u) process stop count = %u",
259 process->ProcessID(), update, process->StopCount());
260 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
262 if (process->StopCount() == 0) {
263 int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID()};
264 struct kinfo_proc processInfo;
265 size_t bufsize = sizeof(processInfo);
266 if (sysctl(mib, (unsigned)(sizeof(mib) / sizeof(int)), &processInfo,
267 &bufsize, NULL, 0) == 0 &&
269 if (processInfo.kp_proc.p_flag & P_LP64)
272 #if defined(__i386__) || defined(__x86_64__)
274 DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64);
276 DNBArchProtocol::SetArchitecture(CPU_TYPE_I386);
277 #elif defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
279 DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64);
281 DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM);
285 if (m_threads.empty() || update) {
286 thread_array_t thread_list = NULL;
287 mach_msg_type_number_t thread_list_count = 0;
288 task_t task = process->Task().TaskPort();
289 DNBError err(::task_threads(task, &thread_list, &thread_list_count),
290 DNBError::MachKernel);
292 if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
293 err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, "
294 "thread_list_count => %u )",
295 task, thread_list, thread_list_count);
297 if (err.Error() == KERN_SUCCESS && thread_list_count > 0) {
298 MachThreadList::collection currThreads;
300 // Iterator through the current thread list and see which threads
301 // we already have in our list (keep them), which ones we don't
302 // (add them), and which ones are not around anymore (remove them).
303 for (idx = 0; idx < thread_list_count; ++idx) {
304 const thread_t mach_port_num = thread_list[idx];
306 uint64_t unique_thread_id =
307 MachThread::GetGloballyUniqueThreadIDForMachPortID(mach_port_num);
308 MachThreadSP thread_sp(GetThreadByID(unique_thread_id));
310 // Keep the existing thread class
311 currThreads.push_back(thread_sp);
313 // We don't have this thread, lets add it.
314 thread_sp.reset(new MachThread(process, m_is_64_bit, unique_thread_id,
317 // Add the new thread regardless of its is user ready state...
318 // Make sure the thread is ready to be displayed and shown to users
319 // before we add this thread to our list...
320 if (thread_sp->IsUserReady()) {
322 new_threads->push_back(thread_sp);
324 currThreads.push_back(thread_sp);
329 m_threads.swap(currThreads);
330 m_current_thread.reset();
332 // Free the vm memory given to us by ::task_threads()
333 vm_size_t thread_list_size =
334 (vm_size_t)(thread_list_count * sizeof(thread_t));
335 ::vm_deallocate(::mach_task_self(), (vm_address_t)thread_list,
339 return static_cast<uint32_t>(m_threads.size());
342 void MachThreadList::CurrentThread(MachThreadSP &thread_sp) {
343 // locker will keep a mutex locked until it goes out of scope
344 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
345 if (m_current_thread.get() == NULL) {
346 // Figure out which thread is going to be our current thread.
347 // This is currently done by finding the first thread in the list
348 // that has a valid exception.
349 const size_t num_threads = m_threads.size();
350 for (uint32_t idx = 0; idx < num_threads; ++idx) {
351 if (m_threads[idx]->GetStopException().IsValid()) {
352 m_current_thread = m_threads[idx];
357 thread_sp = m_current_thread;
360 void MachThreadList::Dump() const {
361 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
362 const size_t num_threads = m_threads.size();
363 for (uint32_t idx = 0; idx < num_threads; ++idx) {
364 m_threads[idx]->Dump(idx);
368 void MachThreadList::ProcessWillResume(
369 MachProcess *process, const DNBThreadResumeActions &thread_actions) {
370 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
372 // Update our thread list, because sometimes libdispatch or the kernel
373 // will spawn threads while a task is suspended.
374 MachThreadList::collection new_threads;
376 // First figure out if we were planning on running only one thread, and if so
377 // force that thread to resume.
379 nub_thread_t solo_thread = INVALID_NUB_THREAD;
380 if (thread_actions.GetSize() > 0 &&
381 thread_actions.NumActionsWithState(eStateStepping) +
382 thread_actions.NumActionsWithState(eStateRunning) ==
384 run_one_thread = true;
385 const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst();
386 size_t num_actions = thread_actions.GetSize();
387 for (size_t i = 0; i < num_actions; i++, action_ptr++) {
388 if (action_ptr->state == eStateStepping ||
389 action_ptr->state == eStateRunning) {
390 solo_thread = action_ptr->tid;
395 run_one_thread = false;
397 UpdateThreadList(process, true, &new_threads);
399 DNBThreadResumeAction resume_new_threads = {-1U, eStateRunning, 0,
400 INVALID_NUB_ADDRESS};
401 // If we are planning to run only one thread, any new threads should be
404 resume_new_threads.state = eStateSuspended;
406 const size_t num_new_threads = new_threads.size();
407 const size_t num_threads = m_threads.size();
408 for (uint32_t idx = 0; idx < num_threads; ++idx) {
409 MachThread *thread = m_threads[idx].get();
410 bool handled = false;
411 for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx) {
412 if (thread == new_threads[new_idx].get()) {
413 thread->ThreadWillResume(&resume_new_threads);
420 const DNBThreadResumeAction *thread_action =
421 thread_actions.GetActionForThread(thread->ThreadID(), true);
422 // There must always be a thread action for every thread.
423 assert(thread_action);
424 bool others_stopped = false;
425 if (solo_thread == thread->ThreadID())
426 others_stopped = true;
427 thread->ThreadWillResume(thread_action, others_stopped);
431 if (new_threads.size()) {
432 for (uint32_t idx = 0; idx < num_new_threads; ++idx) {
434 LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) "
435 "stop-id=%u, resuming newly discovered thread: "
436 "0x%8.8" PRIx64 ", thread-is-user-ready=%i)",
437 process->ProcessID(), process->StopCount(),
438 new_threads[idx]->ThreadID(), new_threads[idx]->IsUserReady());
443 uint32_t MachThreadList::ProcessDidStop(MachProcess *process) {
444 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
445 // Update our thread list
446 const uint32_t num_threads = UpdateThreadList(process, true);
447 for (uint32_t idx = 0; idx < num_threads; ++idx) {
448 m_threads[idx]->ThreadDidStop();
453 //----------------------------------------------------------------------
454 // Check each thread in our thread list to see if we should notify our
455 // client of the current halt in execution.
457 // Breakpoints can have callback functions associated with them than
458 // can return true to stop, or false to continue executing the inferior.
461 // true if we should stop and notify our clients
462 // false if we should resume our child process and skip notification
463 //----------------------------------------------------------------------
464 bool MachThreadList::ShouldStop(bool &step_more) {
465 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
466 uint32_t should_stop = false;
467 const size_t num_threads = m_threads.size();
468 for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
469 should_stop = m_threads[idx]->ShouldStop(step_more);
474 void MachThreadList::NotifyBreakpointChanged(const DNBBreakpoint *bp) {
475 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
476 const size_t num_threads = m_threads.size();
477 for (uint32_t idx = 0; idx < num_threads; ++idx) {
478 m_threads[idx]->NotifyBreakpointChanged(bp);
483 MachThreadList::EnableHardwareBreakpoint(const DNBBreakpoint *bp) const {
485 const size_t num_threads = m_threads.size();
486 for (uint32_t idx = 0; idx < num_threads; ++idx)
487 m_threads[idx]->EnableHardwareBreakpoint(bp);
489 return INVALID_NUB_HW_INDEX;
492 bool MachThreadList::DisableHardwareBreakpoint(const DNBBreakpoint *bp) const {
494 const size_t num_threads = m_threads.size();
495 for (uint32_t idx = 0; idx < num_threads; ++idx)
496 m_threads[idx]->DisableHardwareBreakpoint(bp);
501 // DNBWatchpointSet() -> MachProcess::CreateWatchpoint() ->
502 // MachProcess::EnableWatchpoint()
503 // -> MachThreadList::EnableHardwareWatchpoint().
505 MachThreadList::EnableHardwareWatchpoint(const DNBBreakpoint *wp) const {
506 uint32_t hw_index = INVALID_NUB_HW_INDEX;
508 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
509 const size_t num_threads = m_threads.size();
510 // On Mac OS X we have to prime the control registers for new threads. We
512 // using the control register data for the first thread, for lack of a
513 // better way of choosing.
514 bool also_set_on_task = true;
515 for (uint32_t idx = 0; idx < num_threads; ++idx) {
516 if ((hw_index = m_threads[idx]->EnableHardwareWatchpoint(
517 wp, also_set_on_task)) == INVALID_NUB_HW_INDEX) {
518 // We know that idx failed for some reason. Let's rollback the
519 // transaction for [0, idx).
520 for (uint32_t i = 0; i < idx; ++i)
521 m_threads[i]->RollbackTransForHWP();
522 return INVALID_NUB_HW_INDEX;
524 also_set_on_task = false;
526 // Notify each thread to commit the pending transaction.
527 for (uint32_t idx = 0; idx < num_threads; ++idx)
528 m_threads[idx]->FinishTransForHWP();
533 bool MachThreadList::DisableHardwareWatchpoint(const DNBBreakpoint *wp) const {
535 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
536 const size_t num_threads = m_threads.size();
538 // On Mac OS X we have to prime the control registers for new threads. We
540 // using the control register data for the first thread, for lack of a
541 // better way of choosing.
542 bool also_set_on_task = true;
543 for (uint32_t idx = 0; idx < num_threads; ++idx) {
544 if (!m_threads[idx]->DisableHardwareWatchpoint(wp, also_set_on_task)) {
545 // We know that idx failed for some reason. Let's rollback the
546 // transaction for [0, idx).
547 for (uint32_t i = 0; i < idx; ++i)
548 m_threads[i]->RollbackTransForHWP();
551 also_set_on_task = false;
553 // Notify each thread to commit the pending transaction.
554 for (uint32_t idx = 0; idx < num_threads; ++idx)
555 m_threads[idx]->FinishTransForHWP();
562 uint32_t MachThreadList::NumSupportedHardwareWatchpoints() const {
563 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
564 const size_t num_threads = m_threads.size();
565 // Use an arbitrary thread to retrieve the number of supported hardware
568 return m_threads[0]->NumSupportedHardwareWatchpoints();
572 uint32_t MachThreadList::GetThreadIndexForThreadStoppedWithSignal(
573 const int signo) const {
574 PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
575 uint32_t should_stop = false;
576 const size_t num_threads = m_threads.size();
577 for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
578 if (m_threads[idx]->GetStopException().SoftSignal() == signo)