diff options
Diffstat (limited to 'src/processor/stackwalker_amd64.cc')
-rw-r--r-- | src/processor/stackwalker_amd64.cc | 142 |
1 files changed, 101 insertions, 41 deletions
diff --git a/src/processor/stackwalker_amd64.cc b/src/processor/stackwalker_amd64.cc index 88c87210..38d8fb66 100644 --- a/src/processor/stackwalker_amd64.cc +++ b/src/processor/stackwalker_amd64.cc @@ -34,15 +34,61 @@ // Author: Mark Mentovai, Ted Mielczarek -#include "processor/stackwalker_amd64.h" #include "google_breakpad/processor/call_stack.h" #include "google_breakpad/processor/memory_region.h" +#include "google_breakpad/processor/source_line_resolver_interface.h" #include "google_breakpad/processor/stack_frame_cpu.h" +#include "processor/cfi_frame_info.h" #include "processor/logging.h" +#include "processor/scoped_ptr.h" +#include "processor/stackwalker_amd64.h" namespace google_breakpad { +const StackwalkerAMD64::CFIWalker::RegisterSet +StackwalkerAMD64::cfi_register_map_[] = { + // It may seem like $rip and $rsp are callee-saves, because the callee is + // responsible for having them restored upon return. But the callee_saves + // flags here really means that the walker should assume they're + // unchanged if the CFI doesn't mention them --- clearly wrong for $rip + // and $rsp. + { "$rax", NULL, false, + StackFrameAMD64::CONTEXT_VALID_RAX, &MDRawContextAMD64::rax }, + { "$rdx", NULL, false, + StackFrameAMD64::CONTEXT_VALID_RDX, &MDRawContextAMD64::rdx }, + { "$rcx", NULL, false, + StackFrameAMD64::CONTEXT_VALID_RCX, &MDRawContextAMD64::rcx }, + { "$rbx", NULL, true, + StackFrameAMD64::CONTEXT_VALID_RBX, &MDRawContextAMD64::rbx }, + { "$rsi", NULL, false, + StackFrameAMD64::CONTEXT_VALID_RSI, &MDRawContextAMD64::rsi }, + { "$rdi", NULL, false, + StackFrameAMD64::CONTEXT_VALID_RDI, &MDRawContextAMD64::rdi }, + { "$rbp", NULL, true, + StackFrameAMD64::CONTEXT_VALID_RBP, &MDRawContextAMD64::rbp }, + { "$rsp", ".cfa", false, + StackFrameAMD64::CONTEXT_VALID_RSP, &MDRawContextAMD64::rsp }, + { "$r8", NULL, false, + StackFrameAMD64::CONTEXT_VALID_R8, &MDRawContextAMD64::r8 }, + { "$r9", NULL, false, + StackFrameAMD64::CONTEXT_VALID_R9, &MDRawContextAMD64::r9 }, + { "$r10", NULL, false, + StackFrameAMD64::CONTEXT_VALID_R10, &MDRawContextAMD64::r10 }, + { "$r11", NULL, false, + StackFrameAMD64::CONTEXT_VALID_R11, &MDRawContextAMD64::r11 }, + { "$r12", NULL, true, + StackFrameAMD64::CONTEXT_VALID_R12, &MDRawContextAMD64::r12 }, + { "$r13", NULL, true, + StackFrameAMD64::CONTEXT_VALID_R13, &MDRawContextAMD64::r13 }, + { "$r14", NULL, true, + StackFrameAMD64::CONTEXT_VALID_R14, &MDRawContextAMD64::r14 }, + { "$r15", NULL, true, + StackFrameAMD64::CONTEXT_VALID_R15, &MDRawContextAMD64::r15 }, + { "$rip", ".ra", false, + StackFrameAMD64::CONTEXT_VALID_RIP, &MDRawContextAMD64::rip }, +}; + StackwalkerAMD64::StackwalkerAMD64(const SystemInfo *system_info, const MDRawContextAMD64 *context, MemoryRegion *memory, @@ -50,7 +96,9 @@ StackwalkerAMD64::StackwalkerAMD64(const SystemInfo *system_info, SymbolSupplier *supplier, SourceLineResolverInterface *resolver) : Stackwalker(system_info, memory, modules, supplier, resolver), - context_(context) { + context_(context), + cfi_walker_(cfi_register_map_, + (sizeof(cfi_register_map_) / sizeof(cfi_register_map_[0]))) { } @@ -71,6 +119,26 @@ StackFrame* StackwalkerAMD64::GetContextFrame() { return frame; } +StackFrameAMD64 *StackwalkerAMD64::GetCallerByCFIFrameInfo( + const vector<StackFrame *> &frames, + CFIFrameInfo *cfi_frame_info) { + StackFrameAMD64 *last_frame = static_cast<StackFrameAMD64*>(frames.back()); + + scoped_ptr<StackFrameAMD64> frame(new StackFrameAMD64()); + if (!cfi_walker_ + .FindCallerRegisters(*memory_, *cfi_frame_info, + last_frame->context, last_frame->context_validity, + &frame->context, &frame->context_validity)) + return NULL; + + // Make sure we recovered all the essentials. + static const int essentials = (StackFrameAMD64::CONTEXT_VALID_RIP + | StackFrameAMD64::CONTEXT_VALID_RSP); + if ((frame->context_validity & essentials) != essentials) + return NULL; + + return frame.release(); +} StackFrame* StackwalkerAMD64::GetCallerFrame(const CallStack *stack) { if (!memory_ || !stack) { @@ -78,51 +146,43 @@ StackFrame* StackwalkerAMD64::GetCallerFrame(const CallStack *stack) { return NULL; } - StackFrameAMD64 *last_frame = static_cast<StackFrameAMD64*>( - stack->frames()->back()); - - //FIXME: this pretty much doesn't work at all due to FPO - // being enabled by default. - // Brain-dead stackwalking: - // %rip_new = *(%rbp_old + 8) - // %rsp_new = %rbp_old + 16 - // %rbp_new = *(%rbp_old) - - // A caller frame must reside higher in memory than its callee frames. - // Anything else is an error, or an indication that we've reached the - // end of the stack. - u_int64_t stack_pointer = last_frame->context.rbp + 16; - if (stack_pointer <= last_frame->context.rsp) { - return NULL; - } + const vector<StackFrame *> &frames = *stack->frames(); + StackFrameAMD64 *last_frame = static_cast<StackFrameAMD64 *>(frames.back()); + scoped_ptr<StackFrameAMD64> new_frame; - u_int64_t instruction; - if (!memory_->GetMemoryAtAddress(last_frame->context.rbp + 8, - &instruction) || - instruction <= 1) { - return NULL; + // If we have DWARF CFI information, use it. + if (!new_frame.get()) { + scoped_ptr<CFIFrameInfo> cfi_frame_info(resolver_ + ->FindCFIFrameInfo(last_frame)); + if (cfi_frame_info.get()) + new_frame.reset(GetCallerByCFIFrameInfo(frames, cfi_frame_info.get())); } - u_int64_t stack_base; - if (!memory_->GetMemoryAtAddress(last_frame->context.rbp, - &stack_base) || - stack_base <= 1) { + // If nothing worked, tell the caller. + if (!new_frame.get()) + return NULL; + + // Treat an instruction address of 0 as end-of-stack. + if (new_frame->context.rip == 0) return NULL; - } - - StackFrameAMD64 *frame = new StackFrameAMD64(); - - frame->context = last_frame->context; - frame->context.rip = instruction; - frame->context.rsp = stack_pointer; - frame->context.rbp = stack_base; - frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP | - StackFrameAMD64::CONTEXT_VALID_RSP | - StackFrameAMD64::CONTEXT_VALID_RBP; - frame->instruction = frame->context.rip - 1; + // If the new stack pointer is at a lower address than the old, then + // that's clearly incorrect. Treat this as end-of-stack to enforce + // progress and avoid infinite loops. + if (new_frame->context.rsp <= last_frame->context.rsp) + return NULL; - return frame; + // new_frame->context.rip is the return address, which is one instruction + // past the CALL that caused us to arrive at the callee. Set + // new_frame->instruction to one less than that. This won't reference the + // beginning of the CALL instruction, but it's guaranteed to be within + // the CALL, which is sufficient to get the source line information to + // match up with the line that contains a function call. Callers that + // require the exact return address value may access the context.rip + // field of StackFrameAMD64. + new_frame->instruction = new_frame->context.rip - 1; + + return new_frame.release(); } |