1 //===-- StackFrameList.cpp --------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "lldb/Target/StackFrameList.h"
10 #include "lldb/Breakpoint/Breakpoint.h"
11 #include "lldb/Breakpoint/BreakpointLocation.h"
12 #include "lldb/Core/SourceManager.h"
13 #include "lldb/Core/StreamFile.h"
14 #include "lldb/Symbol/Block.h"
15 #include "lldb/Symbol/Function.h"
16 #include "lldb/Symbol/Symbol.h"
17 #include "lldb/Target/Process.h"
18 #include "lldb/Target/RegisterContext.h"
19 #include "lldb/Target/StackFrame.h"
20 #include "lldb/Target/StopInfo.h"
21 #include "lldb/Target/Target.h"
22 #include "lldb/Target/Thread.h"
23 #include "lldb/Target/Unwind.h"
24 #include "lldb/Utility/Log.h"
25 #include "llvm/ADT/SmallPtrSet.h"
29 //#define DEBUG_STACK_FRAMES 1
32 using namespace lldb_private;
34 // StackFrameList constructor
35 StackFrameList::StackFrameList(Thread &thread,
36 const lldb::StackFrameListSP &prev_frames_sp,
37 bool show_inline_frames)
38 : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(),
39 m_selected_frame_idx(0), m_concrete_frames_fetched(0),
40 m_current_inlined_depth(UINT32_MAX),
41 m_current_inlined_pc(LLDB_INVALID_ADDRESS),
42 m_show_inlined_frames(show_inline_frames) {
44 m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth;
45 m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc;
49 StackFrameList::~StackFrameList() {
50 // Call clear since this takes a lock and clears the stack frame list in case
51 // another thread is currently using this stack frame list
55 void StackFrameList::CalculateCurrentInlinedDepth() {
56 uint32_t cur_inlined_depth = GetCurrentInlinedDepth();
57 if (cur_inlined_depth == UINT32_MAX) {
58 ResetCurrentInlinedDepth();
62 uint32_t StackFrameList::GetCurrentInlinedDepth() {
63 if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) {
64 lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC();
65 if (cur_pc != m_current_inlined_pc) {
66 m_current_inlined_pc = LLDB_INVALID_ADDRESS;
67 m_current_inlined_depth = UINT32_MAX;
68 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
69 if (log && log->GetVerbose())
71 "GetCurrentInlinedDepth: invalidating current inlined depth.\n");
73 return m_current_inlined_depth;
79 void StackFrameList::ResetCurrentInlinedDepth() {
80 if (!m_show_inlined_frames)
83 std::lock_guard<std::recursive_mutex> guard(m_mutex);
88 if (!m_frames[0]->IsInlined()) {
89 m_current_inlined_depth = UINT32_MAX;
90 m_current_inlined_pc = LLDB_INVALID_ADDRESS;
91 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
92 if (log && log->GetVerbose())
94 "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n");
98 // We only need to do something special about inlined blocks when we are
99 // at the beginning of an inlined function:
100 // FIXME: We probably also have to do something special if the PC is at
101 // the END of an inlined function, which coincides with the end of either
102 // its containing function or another inlined function.
104 Block *block_ptr = m_frames[0]->GetFrameBlock();
108 Address pc_as_address;
109 lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC();
110 pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget()));
111 AddressRange containing_range;
112 if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) ||
113 pc_as_address != containing_range.GetBaseAddress())
116 // If we got here because of a breakpoint hit, then set the inlined depth
117 // depending on where the breakpoint was set. If we got here because of a
118 // crash, then set the inlined depth to the deepest most block. Otherwise,
119 // we stopped here naturally as the result of a step, so set ourselves in the
120 // containing frame of the whole set of nested inlines, so the user can then
121 // "virtually" step into the frames one by one, or next over the whole mess.
122 // Note: We don't have to handle being somewhere in the middle of the stack
123 // here, since ResetCurrentInlinedDepth doesn't get called if there is a
124 // valid inlined depth set.
125 StopInfoSP stop_info_sp = m_thread.GetStopInfo();
128 switch (stop_info_sp->GetStopReason()) {
129 case eStopReasonWatchpoint:
130 case eStopReasonException:
131 case eStopReasonExec:
132 case eStopReasonSignal:
133 // In all these cases we want to stop in the deepest frame.
134 m_current_inlined_pc = curr_pc;
135 m_current_inlined_depth = 0;
137 case eStopReasonBreakpoint: {
138 // FIXME: Figure out what this break point is doing, and set the inline
139 // depth appropriately. Be careful to take into account breakpoints that
140 // implement step over prologue, since that should do the default
141 // calculation. For now, if the breakpoints corresponding to this hit are
142 // all internal, I set the stop location to the top of the inlined stack,
143 // since that will make things like stepping over prologues work right.
144 // But if there are any non-internal breakpoints I do to the bottom of the
145 // stack, since that was the old behavior.
146 uint32_t bp_site_id = stop_info_sp->GetValue();
147 BreakpointSiteSP bp_site_sp(
148 m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id));
149 bool all_internal = true;
151 uint32_t num_owners = bp_site_sp->GetNumberOfOwners();
152 for (uint32_t i = 0; i < num_owners; i++) {
153 Breakpoint &bp_ref = bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint();
154 if (!bp_ref.IsInternal()) {
155 all_internal = false;
160 m_current_inlined_pc = curr_pc;
161 m_current_inlined_depth = 0;
167 // Otherwise, we should set ourselves at the container of the inlining, so
168 // that the user can descend into them. So first we check whether we have
169 // more than one inlined block sharing this PC:
170 int num_inlined_functions = 0;
172 for (Block *container_ptr = block_ptr->GetInlinedParent();
173 container_ptr != nullptr;
174 container_ptr = container_ptr->GetInlinedParent()) {
175 if (!container_ptr->GetRangeContainingAddress(pc_as_address,
178 if (pc_as_address != containing_range.GetBaseAddress())
181 num_inlined_functions++;
183 m_current_inlined_pc = curr_pc;
184 m_current_inlined_depth = num_inlined_functions + 1;
185 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
186 if (log && log->GetVerbose())
187 log->Printf("ResetCurrentInlinedDepth: setting inlined "
188 "depth: %d 0x%" PRIx64 ".\n",
189 m_current_inlined_depth, curr_pc);
196 bool StackFrameList::DecrementCurrentInlinedDepth() {
197 if (m_show_inlined_frames) {
198 uint32_t current_inlined_depth = GetCurrentInlinedDepth();
199 if (current_inlined_depth != UINT32_MAX) {
200 if (current_inlined_depth > 0) {
201 m_current_inlined_depth--;
209 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) {
210 m_current_inlined_depth = new_depth;
211 if (new_depth == UINT32_MAX)
212 m_current_inlined_pc = LLDB_INVALID_ADDRESS;
214 m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC();
217 void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx,
219 assert(m_thread.IsValid() && "Expected valid thread");
220 assert(m_frames.size() <= end_idx && "Expected there to be frames to fill");
222 if (end_idx < m_concrete_frames_fetched)
228 uint32_t num_frames = unwinder->GetFramesUpTo(end_idx);
229 if (num_frames <= end_idx + 1) {
231 m_concrete_frames_fetched = UINT32_MAX;
234 // Don't create the frames eagerly. Defer this work to GetFrameAtIndex,
235 // which can lazily query the unwinder to create frames.
236 m_frames.resize(num_frames);
239 /// Find the unique path through the call graph from \p begin (with return PC
240 /// \p return_pc) to \p end. On success this path is stored into \p path, and
241 /// on failure \p path is unchanged.
242 static void FindInterveningFrames(Function &begin, Function &end,
243 Target &target, addr_t return_pc,
244 std::vector<Function *> &path,
245 ModuleList &images, Log *log) {
246 LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}",
247 begin.GetDisplayName(), end.GetDisplayName(), return_pc);
249 // Find a non-tail calling edge with the correct return PC.
250 auto first_level_edges = begin.GetCallEdges();
252 for (const CallEdge &edge : first_level_edges)
253 LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}",
254 edge.GetReturnPCAddress(begin, target));
255 auto first_edge_it = std::lower_bound(
256 first_level_edges.begin(), first_level_edges.end(), return_pc,
257 [&](const CallEdge &edge, addr_t target_pc) {
258 return edge.GetReturnPCAddress(begin, target) < target_pc;
260 if (first_edge_it == first_level_edges.end() ||
261 first_edge_it->GetReturnPCAddress(begin, target) != return_pc) {
262 LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}",
263 begin.GetDisplayName(), return_pc);
266 CallEdge &first_edge = const_cast<CallEdge &>(*first_edge_it);
268 // The first callee may not be resolved, or there may be nothing to fill in.
269 Function *first_callee = first_edge.GetCallee(images);
271 LLDB_LOG(log, "Could not resolve callee");
274 if (first_callee == &end) {
275 LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})",
276 end.GetDisplayName(), return_pc);
280 // Run DFS on the tail-calling edges out of the first callee to find \p end.
281 // Fully explore the set of functions reachable from the first edge via tail
282 // calls in order to detect ambiguous executions.
284 std::vector<Function *> active_path = {};
285 std::vector<Function *> solution_path = {};
286 llvm::SmallPtrSet<Function *, 2> visited_nodes = {};
287 bool ambiguous = false;
291 DFS(Function *end, ModuleList &images) : end(end), images(images) {}
293 void search(Function *first_callee, std::vector<Function *> &path) {
296 path = std::move(solution_path);
299 void dfs(Function *callee) {
300 // Found a path to the target function.
302 if (solution_path.empty())
303 solution_path = active_path;
309 // Terminate the search if tail recursion is found, or more generally if
310 // there's more than one way to reach a target. This errs on the side of
311 // caution: it conservatively stops searching when some solutions are
312 // still possible to save time in the average case.
313 if (!visited_nodes.insert(callee).second) {
318 // Search the calls made from this callee.
319 active_path.push_back(callee);
320 for (CallEdge &edge : callee->GetTailCallingEdges()) {
321 Function *next_callee = edge.GetCallee(images);
329 active_path.pop_back();
333 DFS(&end, images).search(first_callee, path);
336 /// Given that \p next_frame will be appended to the frame list, synthesize
337 /// tail call frames between the current end of the list and \p next_frame.
338 /// If any frames are added, adjust the frame index of \p next_frame.
341 /// | ... | <- Completed frames.
345 /// | ... | <- Artificial frames inserted here.
349 /// | ... | <- Not-yet-visited frames.
351 void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) {
352 TargetSP target_sp = next_frame.CalculateTarget();
356 lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext();
357 if (!next_reg_ctx_sp)
360 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
362 assert(!m_frames.empty() && "Cannot synthesize frames in an empty stack");
363 StackFrame &prev_frame = *m_frames.back().get();
365 // Find the functions prev_frame and next_frame are stopped in. The function
366 // objects are needed to search the lazy call graph for intervening frames.
367 Function *prev_func =
368 prev_frame.GetSymbolContext(eSymbolContextFunction).function;
370 LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function");
373 Function *next_func =
374 next_frame.GetSymbolContext(eSymbolContextFunction).function;
376 LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function");
380 // Try to find the unique sequence of (tail) calls which led from next_frame
382 std::vector<Function *> path;
383 addr_t return_pc = next_reg_ctx_sp->GetPC();
384 Target &target = *target_sp.get();
385 ModuleList &images = next_frame.CalculateTarget()->GetImages();
386 FindInterveningFrames(*next_func, *prev_func, target, return_pc, path, images,
389 // Push synthetic tail call frames.
390 for (Function *callee : llvm::reverse(path)) {
391 uint32_t frame_idx = m_frames.size();
392 uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex();
393 addr_t cfa = LLDB_INVALID_ADDRESS;
394 bool cfa_is_valid = false;
396 callee->GetAddressRange().GetBaseAddress().GetLoadAddress(&target);
398 callee->CalculateSymbolContext(&sc);
399 auto synth_frame = std::make_shared<StackFrame>(
400 m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa,
401 cfa_is_valid, pc, StackFrame::Kind::Artificial, &sc);
402 m_frames.push_back(synth_frame);
403 LLDB_LOG(log, "Pushed frame {0}", callee->GetDisplayName());
406 // If any frames were created, adjust next_frame's index.
408 next_frame.SetFrameIndex(m_frames.size());
411 void StackFrameList::GetFramesUpTo(uint32_t end_idx) {
412 // Do not fetch frames for an invalid thread.
413 if (!m_thread.IsValid())
416 // We've already gotten more frames than asked for, or we've already finished
417 // unwinding, return.
418 if (m_frames.size() > end_idx || GetAllFramesFetched())
421 Unwind *unwinder = m_thread.GetUnwinder();
423 if (!m_show_inlined_frames) {
424 GetOnlyConcreteFramesUpTo(end_idx, unwinder);
428 #if defined(DEBUG_STACK_FRAMES)
429 StreamFile s(stdout, false);
431 // If we are hiding some frames from the outside world, we need to add
432 // those onto the total count of frames to fetch. However, we don't need
433 // to do that if end_idx is 0 since in that case we always get the first
434 // concrete frame and all the inlined frames below it... And of course, if
435 // end_idx is UINT32_MAX that means get all, so just do that...
437 uint32_t inlined_depth = 0;
438 if (end_idx > 0 && end_idx != UINT32_MAX) {
439 inlined_depth = GetCurrentInlinedDepth();
440 if (inlined_depth != UINT32_MAX) {
442 end_idx += inlined_depth;
446 StackFrameSP unwind_frame_sp;
448 uint32_t idx = m_concrete_frames_fetched++;
449 lldb::addr_t pc = LLDB_INVALID_ADDRESS;
450 lldb::addr_t cfa = LLDB_INVALID_ADDRESS;
452 // We might have already created frame zero, only create it if we need
454 if (m_frames.empty()) {
455 RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext());
459 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc);
460 // There shouldn't be any way not to get the frame info for frame
461 // 0. But if the unwinder can't make one, lets make one by hand
462 // with the SP as the CFA and see if that gets any further.
464 cfa = reg_ctx_sp->GetSP();
465 pc = reg_ctx_sp->GetPC();
468 unwind_frame_sp = std::make_shared<StackFrame>(
469 m_thread.shared_from_this(), m_frames.size(), idx, reg_ctx_sp,
471 m_frames.push_back(unwind_frame_sp);
474 unwind_frame_sp = m_frames.front();
475 cfa = unwind_frame_sp->m_id.GetCallFrameAddress();
479 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc);
481 // We've gotten to the end of the stack.
482 SetAllFramesFetched();
485 const bool cfa_is_valid = true;
486 unwind_frame_sp = std::make_shared<StackFrame>(
487 m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid,
488 pc, StackFrame::Kind::Regular, nullptr);
490 // Create synthetic tail call frames between the previous frame and the
491 // newly-found frame. The new frame's index may change after this call,
492 // although its concrete index will stay the same.
493 SynthesizeTailCallFrames(*unwind_frame_sp.get());
495 m_frames.push_back(unwind_frame_sp);
498 assert(unwind_frame_sp);
499 SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext(
500 eSymbolContextBlock | eSymbolContextFunction);
501 Block *unwind_block = unwind_sc.block;
503 Address curr_frame_address(unwind_frame_sp->GetFrameCodeAddress());
504 TargetSP target_sp = m_thread.CalculateTarget();
505 // Be sure to adjust the frame address to match the address that was
506 // used to lookup the symbol context above. If we are in the first
507 // concrete frame, then we lookup using the current address, else we
508 // decrement the address by one to get the correct location.
510 if (curr_frame_address.GetOffset() == 0) {
511 // If curr_frame_address points to the first address in a section
512 // then after adjustment it will point to an other section. In that
513 // case resolve the address again to the correct section plus
515 addr_t load_addr = curr_frame_address.GetOpcodeLoadAddress(
516 target_sp.get(), AddressClass::eCode);
517 curr_frame_address.SetOpcodeLoadAddress(
518 load_addr - 1, target_sp.get(), AddressClass::eCode);
520 curr_frame_address.Slide(-1);
524 SymbolContext next_frame_sc;
525 Address next_frame_address;
527 while (unwind_sc.GetParentOfInlinedScope(
528 curr_frame_address, next_frame_sc, next_frame_address)) {
529 next_frame_sc.line_entry.ApplyFileMappings(target_sp);
530 StackFrameSP frame_sp(
531 new StackFrame(m_thread.shared_from_this(), m_frames.size(), idx,
532 unwind_frame_sp->GetRegisterContextSP(), cfa,
533 next_frame_address, &next_frame_sc));
535 m_frames.push_back(frame_sp);
536 unwind_sc = next_frame_sc;
537 curr_frame_address = next_frame_address;
540 } while (m_frames.size() - 1 < end_idx);
542 // Don't try to merge till you've calculated all the frames in this stack.
543 if (GetAllFramesFetched() && m_prev_frames_sp) {
544 StackFrameList *prev_frames = m_prev_frames_sp.get();
545 StackFrameList *curr_frames = this;
547 #if defined(DEBUG_STACK_FRAMES)
548 s.PutCString("\nprev_frames:\n");
549 prev_frames->Dump(&s);
550 s.PutCString("\ncurr_frames:\n");
551 curr_frames->Dump(&s);
554 size_t curr_frame_num, prev_frame_num;
556 for (curr_frame_num = curr_frames->m_frames.size(),
557 prev_frame_num = prev_frames->m_frames.size();
558 curr_frame_num > 0 && prev_frame_num > 0;
559 --curr_frame_num, --prev_frame_num) {
560 const size_t curr_frame_idx = curr_frame_num - 1;
561 const size_t prev_frame_idx = prev_frame_num - 1;
562 StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]);
563 StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]);
565 #if defined(DEBUG_STACK_FRAMES)
566 s.Printf("\n\nCurr frame #%u ", curr_frame_idx);
568 curr_frame_sp->Dump(&s, true, false);
570 s.PutCString("NULL");
571 s.Printf("\nPrev frame #%u ", prev_frame_idx);
573 prev_frame_sp->Dump(&s, true, false);
575 s.PutCString("NULL");
578 StackFrame *curr_frame = curr_frame_sp.get();
579 StackFrame *prev_frame = prev_frame_sp.get();
581 if (curr_frame == nullptr || prev_frame == nullptr)
584 // Check the stack ID to make sure they are equal.
585 if (curr_frame->GetStackID() != prev_frame->GetStackID())
588 prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame);
589 // Now copy the fixed up previous frame into the current frames so the
590 // pointer doesn't change.
591 m_frames[curr_frame_idx] = prev_frame_sp;
593 #if defined(DEBUG_STACK_FRAMES)
594 s.Printf("\n Copying previous frame to current frame");
597 // We are done with the old stack frame list, we can release it now.
598 m_prev_frames_sp.reset();
601 #if defined(DEBUG_STACK_FRAMES)
602 s.PutCString("\n\nNew frames:\n");
608 uint32_t StackFrameList::GetNumFrames(bool can_create) {
609 std::lock_guard<std::recursive_mutex> guard(m_mutex);
612 GetFramesUpTo(UINT32_MAX);
614 return GetVisibleStackFrameIndex(m_frames.size());
617 void StackFrameList::Dump(Stream *s) {
621 std::lock_guard<std::recursive_mutex> guard(m_mutex);
623 const_iterator pos, begin = m_frames.begin(), end = m_frames.end();
624 for (pos = begin; pos != end; ++pos) {
625 StackFrame *frame = (*pos).get();
626 s->Printf("%p: ", static_cast<void *>(frame));
628 frame->GetStackID().Dump(s);
629 frame->DumpUsingSettingsFormat(s);
631 s->Printf("frame #%u", (uint32_t)std::distance(begin, pos));
637 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) {
638 StackFrameSP frame_sp;
639 std::lock_guard<std::recursive_mutex> guard(m_mutex);
640 uint32_t original_idx = idx;
642 uint32_t inlined_depth = GetCurrentInlinedDepth();
643 if (inlined_depth != UINT32_MAX)
644 idx += inlined_depth;
646 if (idx < m_frames.size())
647 frame_sp = m_frames[idx];
652 // GetFramesUpTo will fill m_frames with as many frames as you asked for, if
653 // there are that many. If there weren't then you asked for too many frames.
655 if (idx < m_frames.size()) {
656 if (m_show_inlined_frames) {
657 // When inline frames are enabled we actually create all the frames in
659 frame_sp = m_frames[idx];
661 Unwind *unwinder = m_thread.GetUnwinder();
664 if (unwinder->GetFrameInfoAtIndex(idx, cfa, pc)) {
665 const bool cfa_is_valid = true;
666 frame_sp = std::make_shared<StackFrame>(
667 m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc,
668 StackFrame::Kind::Regular, nullptr);
671 frame_sp->GetSymbolContext(eSymbolContextFunction).function;
673 // When we aren't showing inline functions we always use the top
674 // most function block as the scope.
675 frame_sp->SetSymbolContextScope(&function->GetBlock(false));
677 // Set the symbol scope from the symbol regardless if it is nullptr
679 frame_sp->SetSymbolContextScope(
680 frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol);
682 SetFrameAtIndex(idx, frame_sp);
686 } else if (original_idx == 0) {
687 // There should ALWAYS be a frame at index 0. If something went wrong with
688 // the CurrentInlinedDepth such that there weren't as many frames as we
689 // thought taking that into account, then reset the current inlined depth
690 // and return the real zeroth frame.
691 if (m_frames.empty()) {
692 // Why do we have a thread with zero frames, that should not ever
694 assert(!m_thread.IsValid() && "A valid thread has no frames.");
696 ResetCurrentInlinedDepth();
697 frame_sp = m_frames[original_idx];
705 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) {
706 // First try assuming the unwind index is the same as the frame index. The
707 // unwind index is always greater than or equal to the frame index, so it is
708 // a good place to start. If we have inlined frames we might have 5 concrete
709 // frames (frame unwind indexes go from 0-4), but we might have 15 frames
710 // after we make all the inlined frames. Most of the time the unwind frame
711 // index (or the concrete frame index) is the same as the frame index.
712 uint32_t frame_idx = unwind_idx;
713 StackFrameSP frame_sp(GetFrameAtIndex(frame_idx));
715 if (frame_sp->GetFrameIndex() == unwind_idx)
717 frame_sp = GetFrameAtIndex(++frame_idx);
722 static bool CompareStackID(const StackFrameSP &stack_sp,
723 const StackID &stack_id) {
724 return stack_sp->GetStackID() < stack_id;
727 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) {
728 StackFrameSP frame_sp;
730 if (stack_id.IsValid()) {
731 std::lock_guard<std::recursive_mutex> guard(m_mutex);
732 uint32_t frame_idx = 0;
733 // Do a binary search in case the stack frame is already in our cache
734 collection::const_iterator begin = m_frames.begin();
735 collection::const_iterator end = m_frames.end();
737 collection::const_iterator pos =
738 std::lower_bound(begin, end, stack_id, CompareStackID);
740 if ((*pos)->GetStackID() == stack_id)
745 frame_sp = GetFrameAtIndex(frame_idx);
746 if (frame_sp && frame_sp->GetStackID() == stack_id)
754 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) {
755 if (idx >= m_frames.size())
756 m_frames.resize(idx + 1);
757 // Make sure allocation succeeded by checking bounds again
758 if (idx < m_frames.size()) {
759 m_frames[idx] = frame_sp;
762 return false; // resize failed, out of memory?
765 uint32_t StackFrameList::GetSelectedFrameIndex() const {
766 std::lock_guard<std::recursive_mutex> guard(m_mutex);
767 return m_selected_frame_idx;
770 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) {
771 std::lock_guard<std::recursive_mutex> guard(m_mutex);
773 const_iterator begin = m_frames.begin();
774 const_iterator end = m_frames.end();
775 m_selected_frame_idx = 0;
776 for (pos = begin; pos != end; ++pos) {
777 if (pos->get() == frame) {
778 m_selected_frame_idx = std::distance(begin, pos);
779 uint32_t inlined_depth = GetCurrentInlinedDepth();
780 if (inlined_depth != UINT32_MAX)
781 m_selected_frame_idx -= inlined_depth;
785 SetDefaultFileAndLineToSelectedFrame();
786 return m_selected_frame_idx;
789 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) {
790 std::lock_guard<std::recursive_mutex> guard(m_mutex);
791 StackFrameSP frame_sp(GetFrameAtIndex(idx));
793 SetSelectedFrame(frame_sp.get());
799 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
800 if (m_thread.GetID() ==
801 m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) {
802 StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex()));
804 SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry);
805 if (sc.line_entry.file)
806 m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
807 sc.line_entry.file, sc.line_entry.line);
812 // The thread has been run, reset the number stack frames to zero so we can
813 // determine how many frames we have lazily.
814 void StackFrameList::Clear() {
815 std::lock_guard<std::recursive_mutex> guard(m_mutex);
817 m_concrete_frames_fetched = 0;
820 void StackFrameList::Merge(std::unique_ptr<StackFrameList> &curr_up,
821 lldb::StackFrameListSP &prev_sp) {
822 std::unique_lock<std::recursive_mutex> current_lock, previous_lock;
824 current_lock = std::unique_lock<std::recursive_mutex>(curr_up->m_mutex);
826 previous_lock = std::unique_lock<std::recursive_mutex>(prev_sp->m_mutex);
828 #if defined(DEBUG_STACK_FRAMES)
829 StreamFile s(stdout, false);
830 s.PutCString("\n\nStackFrameList::Merge():\nPrev:\n");
834 s.PutCString("NULL");
835 s.PutCString("\nCurr:\n");
839 s.PutCString("NULL");
843 if (!curr_up || curr_up->GetNumFrames(false) == 0) {
844 #if defined(DEBUG_STACK_FRAMES)
845 s.PutCString("No current frames, leave previous frames alone...\n");
851 if (!prev_sp || prev_sp->GetNumFrames(false) == 0) {
852 #if defined(DEBUG_STACK_FRAMES)
853 s.PutCString("No previous frames, so use current frames...\n");
855 // We either don't have any previous frames, or since we have more than one
856 // current frames it means we have all the frames and can safely replace
857 // our previous frames.
858 prev_sp.reset(curr_up.release());
862 const uint32_t num_curr_frames = curr_up->GetNumFrames(false);
864 if (num_curr_frames > 1) {
865 #if defined(DEBUG_STACK_FRAMES)
867 "We have more than one current frame, so use current frames...\n");
869 // We have more than one current frames it means we have all the frames and
870 // can safely replace our previous frames.
871 prev_sp.reset(curr_up.release());
873 #if defined(DEBUG_STACK_FRAMES)
874 s.PutCString("\nMerged:\n");
880 StackFrameSP prev_frame_zero_sp(prev_sp->GetFrameAtIndex(0));
881 StackFrameSP curr_frame_zero_sp(curr_up->GetFrameAtIndex(0));
882 StackID curr_stack_id(curr_frame_zero_sp->GetStackID());
883 StackID prev_stack_id(prev_frame_zero_sp->GetStackID());
885 #if defined(DEBUG_STACK_FRAMES)
886 const uint32_t num_prev_frames = prev_sp->GetNumFrames(false);
887 s.Printf("\n%u previous frames with one current frame\n", num_prev_frames);
890 // We have only a single current frame
891 // Our previous stack frames only had a single frame as well...
892 if (curr_stack_id == prev_stack_id) {
893 #if defined(DEBUG_STACK_FRAMES)
894 s.Printf("\nPrevious frame #0 is same as current frame #0, merge the "
898 curr_frame_zero_sp->UpdateCurrentFrameFromPreviousFrame(
899 *prev_frame_zero_sp);
900 // prev_frame_zero_sp->UpdatePreviousFrameFromCurrentFrame
901 // (*curr_frame_zero_sp);
902 // prev_sp->SetFrameAtIndex (0, prev_frame_zero_sp);
903 } else if (curr_stack_id < prev_stack_id) {
904 #if defined(DEBUG_STACK_FRAMES)
905 s.Printf("\nCurrent frame #0 has a stack ID that is less than the previous "
906 "frame #0, insert current frame zero in front of previous\n");
908 prev_sp->m_frames.insert(prev_sp->m_frames.begin(), curr_frame_zero_sp);
913 #if defined(DEBUG_STACK_FRAMES)
914 s.PutCString("\nMerged:\n");
920 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) {
922 const_iterator begin = m_frames.begin();
923 const_iterator end = m_frames.end();
924 lldb::StackFrameSP ret_sp;
926 for (pos = begin; pos != end; ++pos) {
927 if (pos->get() == stack_frame_ptr) {
935 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame,
936 uint32_t num_frames, bool show_frame_info,
937 uint32_t num_frames_with_source,
939 const char *selected_frame_marker) {
940 size_t num_frames_displayed = 0;
945 StackFrameSP frame_sp;
946 uint32_t frame_idx = 0;
949 // Don't let the last frame wrap around...
950 if (num_frames == UINT32_MAX)
951 last_frame = UINT32_MAX;
953 last_frame = first_frame + num_frames;
955 StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame();
956 const char *unselected_marker = nullptr;
958 if (selected_frame_marker) {
959 size_t len = strlen(selected_frame_marker);
960 buffer.insert(buffer.begin(), len, ' ');
961 unselected_marker = buffer.c_str();
963 const char *marker = nullptr;
965 for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) {
966 frame_sp = GetFrameAtIndex(frame_idx);
970 if (selected_frame_marker != nullptr) {
971 if (frame_sp == selected_frame_sp)
972 marker = selected_frame_marker;
974 marker = unselected_marker;
977 if (!frame_sp->GetStatus(strm, show_frame_info,
978 num_frames_with_source > (first_frame - frame_idx),
979 show_unique, marker))
981 ++num_frames_displayed;
985 return num_frames_displayed;