1 //===-- RegisterContext_x86_64.cpp -------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
14 #include "lldb/Core/DataBufferHeap.h"
15 #include "lldb/Core/DataExtractor.h"
16 #include "lldb/Core/RegisterValue.h"
17 #include "lldb/Core/Scalar.h"
18 #include "lldb/Target/Target.h"
19 #include "lldb/Target/Thread.h"
20 #include "lldb/Host/Endian.h"
21 #include "llvm/Support/Compiler.h"
23 #include "ProcessPOSIX.h"
24 #if defined(__linux__) or defined(__FreeBSD__)
25 #include "ProcessMonitor.h"
27 #include "RegisterContext_i386.h"
28 #include "RegisterContext_x86.h"
29 #include "RegisterContext_x86_64.h"
30 #include "Plugins/Process/elf-core/ProcessElfCore.h"
32 using namespace lldb_private;
35 // Support ptrace extensions even when compiled without required kernel support
37 #define NT_X86_XSTATE 0x202
42 gcc_dwarf_gpr_rax = 0,
179 uint32_t g_gpr_regnums[k_num_gpr_registers] =
217 static const uint32_t
218 g_fpu_regnums[k_num_fpr_registers] =
256 static const uint32_t
257 g_avx_regnums[k_num_avx_registers] =
277 // Number of register sets provided by this context.
280 k_num_extended_register_sets = 1,
281 k_num_register_sets = 3
284 static const RegisterSet
285 g_reg_sets[k_num_register_sets] =
287 { "General Purpose Registers", "gpr", k_num_gpr_registers, g_gpr_regnums },
288 { "Floating Point Registers", "fpu", k_num_fpr_registers, g_fpu_regnums },
289 { "Advanced Vector Extensions", "avx", k_num_avx_registers, g_avx_regnums }
292 // Computes the offset of the given FPR in the extended data area.
293 #define FPR_OFFSET(regname) \
294 (offsetof(RegisterContext_x86_64::FPR, xstate) + \
295 offsetof(RegisterContext_x86_64::FXSAVE, regname))
297 // Computes the offset of the YMM register assembled from register halves.
298 #define YMM_OFFSET(regname) \
299 (offsetof(RegisterContext_x86_64::YMM, regname))
301 // Number of bytes needed to represent a i386 GPR
302 #define GPR_i386_SIZE(reg) sizeof(((RegisterContext_i386::GPR*)NULL)->reg)
304 // Number of bytes needed to represent a FPR.
305 #define FPR_SIZE(reg) sizeof(((RegisterContext_x86_64::FXSAVE*)NULL)->reg)
307 // Number of bytes needed to represent the i'th FP register.
308 #define FP_SIZE sizeof(((RegisterContext_x86_64::MMSReg*)NULL)->bytes)
310 // Number of bytes needed to represent an XMM register.
311 #define XMM_SIZE sizeof(RegisterContext_x86_64::XMMReg)
313 // Number of bytes needed to represent a YMM register.
314 #define YMM_SIZE sizeof(RegisterContext_x86_64::YMMReg)
316 // Note that the size and offset will be updated by platform-specific classes.
317 #define DEFINE_GPR(reg, alt, kind1, kind2, kind3, kind4) \
318 { #reg, alt, 0, 0, eEncodingUint, \
319 eFormatHex, { kind1, kind2, kind3, kind4, gpr_##reg }, NULL, NULL }
321 // Dummy data for RegisterInfo::value_regs as expected by DumpRegisterSet.
322 static uint32_t value_regs = LLDB_INVALID_REGNUM;
324 #define DEFINE_GPR_i386(reg_i386, reg_x86_64, alt, kind1, kind2, kind3, kind4) \
325 { #reg_i386, alt, GPR_i386_SIZE(reg_i386), 0, eEncodingUint, \
326 eFormatHex, { kind1, kind2, kind3, kind4, gpr_##reg_i386 }, &value_regs, NULL }
328 #define DEFINE_FPR(reg, kind1, kind2, kind3, kind4) \
329 { #reg, NULL, FPR_SIZE(reg), FPR_OFFSET(reg), eEncodingUint, \
330 eFormatHex, { kind1, kind2, kind3, kind4, fpu_##reg }, NULL, NULL }
332 #define DEFINE_FP(reg, i) \
333 { #reg#i, NULL, FP_SIZE, LLVM_EXTENSION FPR_OFFSET(reg[i]), \
334 eEncodingVector, eFormatVectorOfUInt8, \
335 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \
336 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL }
338 #define DEFINE_XMM(reg, i) \
339 { #reg#i, NULL, XMM_SIZE, LLVM_EXTENSION FPR_OFFSET(reg[i]), \
340 eEncodingVector, eFormatVectorOfUInt8, \
341 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \
342 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL }
344 #define DEFINE_YMM(reg, i) \
345 { #reg#i, NULL, YMM_SIZE, LLVM_EXTENSION YMM_OFFSET(reg[i]), \
346 eEncodingVector, eFormatVectorOfUInt8, \
347 { gcc_dwarf_fpu_##reg##i, gcc_dwarf_fpu_##reg##i, \
348 LLDB_INVALID_REGNUM, gdb_fpu_##reg##i, fpu_##reg##i }, NULL, NULL }
350 #define DEFINE_DR(reg, i) \
351 { #reg#i, NULL, 0, 0, eEncodingUint, eFormatHex, \
352 { LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, \
353 LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM }, NULL, NULL }
355 #define REG_CONTEXT_SIZE (GetGPRSize() + sizeof(RegisterContext_x86_64::FPR))
358 g_register_infos[k_num_registers] =
360 // General purpose registers.
361 DEFINE_GPR(rax, NULL, gcc_dwarf_gpr_rax, gcc_dwarf_gpr_rax, LLDB_INVALID_REGNUM, gdb_gpr_rax),
362 DEFINE_GPR(rbx, NULL, gcc_dwarf_gpr_rbx, gcc_dwarf_gpr_rbx, LLDB_INVALID_REGNUM, gdb_gpr_rbx),
363 DEFINE_GPR(rcx, NULL, gcc_dwarf_gpr_rcx, gcc_dwarf_gpr_rcx, LLDB_INVALID_REGNUM, gdb_gpr_rcx),
364 DEFINE_GPR(rdx, NULL, gcc_dwarf_gpr_rdx, gcc_dwarf_gpr_rdx, LLDB_INVALID_REGNUM, gdb_gpr_rdx),
365 DEFINE_GPR(rdi, NULL, gcc_dwarf_gpr_rdi, gcc_dwarf_gpr_rdi, LLDB_INVALID_REGNUM, gdb_gpr_rdi),
366 DEFINE_GPR(rsi, NULL, gcc_dwarf_gpr_rsi, gcc_dwarf_gpr_rsi, LLDB_INVALID_REGNUM, gdb_gpr_rsi),
367 DEFINE_GPR(rbp, "fp", gcc_dwarf_gpr_rbp, gcc_dwarf_gpr_rbp, LLDB_REGNUM_GENERIC_FP, gdb_gpr_rbp),
368 DEFINE_GPR(rsp, "sp", gcc_dwarf_gpr_rsp, gcc_dwarf_gpr_rsp, LLDB_REGNUM_GENERIC_SP, gdb_gpr_rsp),
369 DEFINE_GPR(r8, NULL, gcc_dwarf_gpr_r8, gcc_dwarf_gpr_r8, LLDB_INVALID_REGNUM, gdb_gpr_r8),
370 DEFINE_GPR(r9, NULL, gcc_dwarf_gpr_r9, gcc_dwarf_gpr_r9, LLDB_INVALID_REGNUM, gdb_gpr_r9),
371 DEFINE_GPR(r10, NULL, gcc_dwarf_gpr_r10, gcc_dwarf_gpr_r10, LLDB_INVALID_REGNUM, gdb_gpr_r10),
372 DEFINE_GPR(r11, NULL, gcc_dwarf_gpr_r11, gcc_dwarf_gpr_r11, LLDB_INVALID_REGNUM, gdb_gpr_r11),
373 DEFINE_GPR(r12, NULL, gcc_dwarf_gpr_r12, gcc_dwarf_gpr_r12, LLDB_INVALID_REGNUM, gdb_gpr_r12),
374 DEFINE_GPR(r13, NULL, gcc_dwarf_gpr_r13, gcc_dwarf_gpr_r13, LLDB_INVALID_REGNUM, gdb_gpr_r13),
375 DEFINE_GPR(r14, NULL, gcc_dwarf_gpr_r14, gcc_dwarf_gpr_r14, LLDB_INVALID_REGNUM, gdb_gpr_r14),
376 DEFINE_GPR(r15, NULL, gcc_dwarf_gpr_r15, gcc_dwarf_gpr_r15, LLDB_INVALID_REGNUM, gdb_gpr_r15),
377 DEFINE_GPR(rip, "pc", gcc_dwarf_gpr_rip, gcc_dwarf_gpr_rip, LLDB_REGNUM_GENERIC_PC, gdb_gpr_rip),
378 DEFINE_GPR(rflags, "flags", LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_REGNUM_GENERIC_FLAGS, gdb_gpr_rflags),
379 DEFINE_GPR(cs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_cs),
380 DEFINE_GPR(fs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_fs),
381 DEFINE_GPR(gs, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_gs),
382 DEFINE_GPR(ss, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_ss),
383 DEFINE_GPR(ds, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_ds),
384 DEFINE_GPR(es, NULL, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_gpr_es),
386 DEFINE_GPR_i386(eax, rax, NULL, gcc_eax, dwarf_eax, LLDB_INVALID_REGNUM, gdb_eax),
387 DEFINE_GPR_i386(ebx, rbx, NULL, gcc_ebx, dwarf_ebx, LLDB_INVALID_REGNUM, gdb_ebx),
388 DEFINE_GPR_i386(ecx, rcx, NULL, gcc_ecx, dwarf_ecx, LLDB_INVALID_REGNUM, gdb_ecx),
389 DEFINE_GPR_i386(edx, rdx, NULL, gcc_edx, dwarf_edx, LLDB_INVALID_REGNUM, gdb_edx),
390 DEFINE_GPR_i386(edi, rdi, NULL, gcc_edi, dwarf_edi, LLDB_INVALID_REGNUM, gdb_edi),
391 DEFINE_GPR_i386(esi, rsi, NULL, gcc_esi, dwarf_esi, LLDB_INVALID_REGNUM, gdb_esi),
392 DEFINE_GPR_i386(ebp, rbp, "fp", gcc_ebp, dwarf_ebp, LLDB_REGNUM_GENERIC_FP, gdb_ebp),
393 DEFINE_GPR_i386(esp, rsp, "sp", gcc_esp, dwarf_esp, LLDB_REGNUM_GENERIC_SP, gdb_esp),
394 DEFINE_GPR_i386(eip, rip, "pc", gcc_eip, dwarf_eip, LLDB_REGNUM_GENERIC_PC, gdb_eip),
395 DEFINE_GPR_i386(eflags, rflags, "flags", gcc_eflags, dwarf_eflags, LLDB_REGNUM_GENERIC_FLAGS, gdb_eflags),
396 // i387 Floating point registers.
397 DEFINE_FPR(fcw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fcw),
398 DEFINE_FPR(fsw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fsw),
399 DEFINE_FPR(ftw, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ftw),
400 DEFINE_FPR(fop, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_fop),
401 DEFINE_FPR(ip, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ip),
402 // FIXME: Extract segment from ip.
403 DEFINE_FPR(ip, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_cs_64),
404 DEFINE_FPR(dp, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_dp),
405 // FIXME: Extract segment from dp.
406 DEFINE_FPR(dp, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_ds_64),
407 DEFINE_FPR(mxcsr, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, gdb_fpu_mxcsr),
408 DEFINE_FPR(mxcsrmask, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM),
438 // Copy of YMM registers assembled from xmm and ymmh
456 // Debug registers for lldb internal use
467 static bool IsGPR(unsigned reg)
469 return reg <= k_last_gpr; // GPR's come first.
472 static bool IsAVX(unsigned reg)
474 return (k_first_avx <= reg && reg <= k_last_avx);
476 static bool IsFPR(unsigned reg)
478 return (k_first_fpr <= reg && reg <= k_last_fpr);
482 bool RegisterContext_x86_64::IsFPR(unsigned reg, FPRType fpr_type)
484 bool generic_fpr = ::IsFPR(reg);
485 if (fpr_type == eXSAVE)
486 return generic_fpr || IsAVX(reg);
491 RegisterContext_x86_64::RegisterContext_x86_64(Thread &thread,
492 uint32_t concrete_frame_idx)
493 : RegisterContextPOSIX(thread, concrete_frame_idx)
495 // Initialize m_iovec to point to the buffer and buffer size
496 // using the conventions of Berkeley style UIO structures, as required
497 // by PTRACE extensions.
498 m_iovec.iov_base = &m_fpr.xstate.xsave;
499 m_iovec.iov_len = sizeof(m_fpr.xstate.xsave);
501 ::memset(&m_fpr, 0, sizeof(RegisterContext_x86_64::FPR));
503 // elf-core yet to support ReadFPR()
504 ProcessSP base = CalculateProcess();
505 if (base.get()->GetPluginName() == ProcessElfCore::GetPluginNameStatic())
508 // TODO: Use assembly to call cpuid on the inferior and query ebx or ecx
509 m_fpr_type = eXSAVE; // extended floating-point registers, if available
510 if (false == ReadFPR())
511 m_fpr_type = eFXSAVE; // assume generic floating-point registers
514 RegisterContext_x86_64::~RegisterContext_x86_64()
519 RegisterContext_x86_64::Invalidate()
524 RegisterContext_x86_64::InvalidateAllRegisters()
529 RegisterContext_x86_64::GetRegisterOffset(unsigned reg)
531 assert(reg < k_num_registers && "Invalid register number.");
532 return GetRegisterInfo()[reg].byte_offset;
536 RegisterContext_x86_64::GetRegisterSize(unsigned reg)
538 assert(reg < k_num_registers && "Invalid register number.");
539 return GetRegisterInfo()[reg].byte_size;
543 RegisterContext_x86_64::GetRegisterCount()
545 size_t num_registers = k_num_gpr_registers + k_num_fpr_registers;
546 if (m_fpr_type == eXSAVE)
547 return num_registers + k_num_avx_registers;
548 return num_registers;
552 RegisterContext_x86_64::GetRegisterInfo()
554 // Commonly, this method is overridden and g_register_infos is copied and specialized.
555 // So, use GetRegisterInfo() rather than g_register_infos in this scope.
556 return g_register_infos;
560 RegisterContext_x86_64::GetRegisterInfoAtIndex(size_t reg)
562 if (reg < k_num_registers)
563 return &GetRegisterInfo()[reg];
569 RegisterContext_x86_64::GetRegisterSetCount()
572 for (size_t set = 0; set < k_num_register_sets; ++set)
573 if (IsRegisterSetAvailable(set))
580 RegisterContext_x86_64::GetRegisterSet(size_t set)
582 if (IsRegisterSetAvailable(set))
583 return &g_reg_sets[set];
589 RegisterContext_x86_64::GetRegisterIndexFromOffset(unsigned offset)
592 for (reg = 0; reg < k_num_registers; reg++)
594 if (GetRegisterInfo()[reg].byte_offset == offset)
597 assert(reg < k_num_registers && "Invalid register offset.");
602 RegisterContext_x86_64::GetRegisterName(unsigned reg)
604 assert(reg < k_num_registers && "Invalid register offset.");
605 return GetRegisterInfo()[reg].name;
609 RegisterContext_x86_64::GetByteOrder()
611 // Get the target process whose privileged thread was used for the register read.
612 lldb::ByteOrder byte_order = eByteOrderInvalid;
613 Process *process = CalculateProcess().get();
616 byte_order = process->GetByteOrder();
620 // Parse ymm registers and into xmm.bytes and ymmh.bytes.
621 bool RegisterContext_x86_64::CopyYMMtoXSTATE(uint32_t reg, lldb::ByteOrder byte_order)
626 if (byte_order == eByteOrderLittle) {
627 ::memcpy(m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes,
628 m_ymm_set.ymm[reg - fpu_ymm0].bytes,
629 sizeof(RegisterContext_x86_64::XMMReg));
630 ::memcpy(m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes,
631 m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg),
632 sizeof(RegisterContext_x86_64::YMMHReg));
636 if (byte_order == eByteOrderBig) {
637 ::memcpy(m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes,
638 m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg),
639 sizeof(RegisterContext_x86_64::XMMReg));
640 ::memcpy(m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes,
641 m_ymm_set.ymm[reg - fpu_ymm0].bytes,
642 sizeof(RegisterContext_x86_64::YMMHReg));
645 return false; // unsupported or invalid byte order
648 // Concatenate xmm.bytes with ymmh.bytes
649 bool RegisterContext_x86_64::CopyXSTATEtoYMM(uint32_t reg, lldb::ByteOrder byte_order)
654 if (byte_order == eByteOrderLittle) {
655 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes,
656 m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes,
657 sizeof(RegisterContext_x86_64::XMMReg));
658 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg),
659 m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes,
660 sizeof(RegisterContext_x86_64::YMMHReg));
663 if (byte_order == eByteOrderBig) {
664 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes + sizeof(RegisterContext_x86_64::XMMReg),
665 m_fpr.xstate.fxsave.xmm[reg - fpu_ymm0].bytes,
666 sizeof(RegisterContext_x86_64::XMMReg));
667 ::memcpy(m_ymm_set.ymm[reg - fpu_ymm0].bytes,
668 m_fpr.xstate.xsave.ymmh[reg - fpu_ymm0].bytes,
669 sizeof(RegisterContext_x86_64::YMMHReg));
672 return false; // unsupported or invalid byte order
676 RegisterContext_x86_64::IsRegisterSetAvailable(size_t set_index)
678 // Note: Extended register sets are assumed to be at the end of g_reg_sets...
679 size_t num_sets = k_num_register_sets - k_num_extended_register_sets;
680 if (m_fpr_type == eXSAVE) // ...and to start with AVX registers.
683 return (set_index < num_sets);
687 RegisterContext_x86_64::ReadRegister(const RegisterInfo *reg_info, RegisterValue &value)
692 const uint32_t reg = reg_info->kinds[eRegisterKindLLDB];
694 if (IsFPR(reg, m_fpr_type)) {
699 bool success = ReadRegister(reg, value);
701 // If an i386 register should be parsed from an x86_64 register...
702 if (success && reg >= k_first_i386 && reg <= k_last_i386)
703 if (value.GetByteSize() > reg_info->byte_size)
704 value.SetType(reg_info); // ...use the type specified by reg_info rather than the uint64_t default
708 if (reg_info->encoding == eEncodingVector) {
709 ByteOrder byte_order = GetByteOrder();
711 if (byte_order != ByteOrder::eByteOrderInvalid) {
712 if (reg >= fpu_stmm0 && reg <= fpu_stmm7) {
713 value.SetBytes(m_fpr.xstate.fxsave.stmm[reg - fpu_stmm0].bytes, reg_info->byte_size, byte_order);
715 if (reg >= fpu_xmm0 && reg <= fpu_xmm15) {
716 value.SetBytes(m_fpr.xstate.fxsave.xmm[reg - fpu_xmm0].bytes, reg_info->byte_size, byte_order);
718 if (reg >= fpu_ymm0 && reg <= fpu_ymm15) {
719 // Concatenate ymm using the register halves in xmm.bytes and ymmh.bytes
720 if (m_fpr_type == eXSAVE && CopyXSTATEtoYMM(reg, byte_order))
721 value.SetBytes(m_ymm_set.ymm[reg - fpu_ymm0].bytes, reg_info->byte_size, byte_order);
725 return value.GetType() == RegisterValue::eTypeBytes;
730 // Note that lldb uses slightly different naming conventions from sys/user.h
736 value = m_fpr.xstate.fxsave.dp;
739 value = m_fpr.xstate.fxsave.fcw;
742 value = m_fpr.xstate.fxsave.fsw;
745 value = m_fpr.xstate.fxsave.ip;
748 value = m_fpr.xstate.fxsave.fop;
751 value = m_fpr.xstate.fxsave.ftw;
754 value = m_fpr.xstate.fxsave.mxcsr;
757 value = m_fpr.xstate.fxsave.mxcsrmask;
764 RegisterContext_x86_64::ReadAllRegisterValues(DataBufferSP &data_sp)
766 bool success = false;
767 data_sp.reset (new DataBufferHeap (REG_CONTEXT_SIZE, 0));
768 if (data_sp && ReadGPR () && ReadFPR ())
770 uint8_t *dst = data_sp->GetBytes();
774 ::memcpy (dst, &m_gpr, GetGPRSize());
777 if (m_fpr_type == eFXSAVE)
778 ::memcpy (dst, &m_fpr.xstate.fxsave, sizeof(m_fpr.xstate.fxsave));
780 if (m_fpr_type == eXSAVE) {
781 ByteOrder byte_order = GetByteOrder();
783 // Assemble the YMM register content from the register halves.
784 for (uint32_t reg = fpu_ymm0; success && reg <= fpu_ymm15; ++reg)
785 success = CopyXSTATEtoYMM(reg, byte_order);
788 // Copy the extended register state including the assembled ymm registers.
789 ::memcpy (dst, &m_fpr, sizeof(m_fpr));
797 RegisterContext_x86_64::WriteRegister(const lldb_private::RegisterInfo *reg_info,
798 const lldb_private::RegisterValue &value)
800 const uint32_t reg = reg_info->kinds[eRegisterKindLLDB];
802 return WriteRegister(reg, value);
805 if (IsFPR(reg, m_fpr_type)) {
809 if (reg_info->encoding != eEncodingVector)
812 if (reg >= fpu_stmm0 && reg <= fpu_stmm7)
813 ::memcpy (m_fpr.xstate.fxsave.stmm[reg - fpu_stmm0].bytes, value.GetBytes(), value.GetByteSize());
815 if (reg >= fpu_xmm0 && reg <= fpu_xmm15)
816 ::memcpy (m_fpr.xstate.fxsave.xmm[reg - fpu_xmm0].bytes, value.GetBytes(), value.GetByteSize());
818 if (reg >= fpu_ymm0 && reg <= fpu_ymm15) {
819 if (m_fpr_type != eXSAVE)
820 return false; // the target processor does not support AVX
822 // Store ymm register content, and split into the register halves in xmm.bytes and ymmh.bytes
823 ::memcpy (m_ymm_set.ymm[reg - fpu_ymm0].bytes, value.GetBytes(), value.GetByteSize());
824 if (false == CopyYMMtoXSTATE(reg, GetByteOrder()))
829 m_fpr.xstate.fxsave.dp = value.GetAsUInt64();
832 m_fpr.xstate.fxsave.fcw = value.GetAsUInt16();
835 m_fpr.xstate.fxsave.fsw = value.GetAsUInt16();
838 m_fpr.xstate.fxsave.ip = value.GetAsUInt64();
841 m_fpr.xstate.fxsave.fop = value.GetAsUInt16();
844 m_fpr.xstate.fxsave.ftw = value.GetAsUInt16();
847 m_fpr.xstate.fxsave.mxcsr = value.GetAsUInt32();
850 m_fpr.xstate.fxsave.mxcsrmask = value.GetAsUInt32();
855 return CopyYMMtoXSTATE(reg, GetByteOrder());
863 RegisterContext_x86_64::WriteAllRegisterValues(const DataBufferSP &data_sp)
865 bool success = false;
866 if (data_sp && data_sp->GetByteSize() == REG_CONTEXT_SIZE)
868 uint8_t *src = data_sp->GetBytes();
870 ::memcpy (&m_gpr, src, GetGPRSize());
874 if (m_fpr_type == eFXSAVE)
875 ::memcpy (&m_fpr.xstate.fxsave, src, sizeof(m_fpr.xstate.fxsave));
876 if (m_fpr_type == eXSAVE)
877 ::memcpy (&m_fpr.xstate.xsave, src, sizeof(m_fpr.xstate.xsave));
879 success = WriteFPR();
883 if (m_fpr_type == eXSAVE) {
884 ByteOrder byte_order = GetByteOrder();
886 // Parse the YMM register content from the register halves.
887 for (uint32_t reg = fpu_ymm0; success && reg <= fpu_ymm15; ++reg)
888 success = CopyYMMtoXSTATE(reg, byte_order);
898 RegisterContext_x86_64::UpdateAfterBreakpoint()
900 // PC points one byte past the int3 responsible for the breakpoint.
903 if ((pc = GetPC()) == LLDB_INVALID_ADDRESS)
911 RegisterContext_x86_64::ConvertRegisterKindToRegisterNumber(uint32_t kind,
914 const Process *process = CalculateProcess().get();
917 const ArchSpec arch = process->GetTarget().GetArchitecture();;
918 switch (arch.GetCore())
921 assert(false && "CPU type not supported!");
924 case ArchSpec::eCore_x86_32_i386:
925 case ArchSpec::eCore_x86_32_i486:
926 case ArchSpec::eCore_x86_32_i486sx:
928 if (kind == eRegisterKindGeneric)
932 case LLDB_REGNUM_GENERIC_PC: return gpr_eip;
933 case LLDB_REGNUM_GENERIC_SP: return gpr_esp;
934 case LLDB_REGNUM_GENERIC_FP: return gpr_ebp;
935 case LLDB_REGNUM_GENERIC_FLAGS: return gpr_eflags;
936 case LLDB_REGNUM_GENERIC_RA:
938 return LLDB_INVALID_REGNUM;
942 if (kind == eRegisterKindGCC || kind == eRegisterKindDWARF)
946 case dwarf_eax: return gpr_eax;
947 case dwarf_edx: return gpr_edx;
948 case dwarf_ecx: return gpr_ecx;
949 case dwarf_ebx: return gpr_ebx;
950 case dwarf_esi: return gpr_esi;
951 case dwarf_edi: return gpr_edi;
952 case dwarf_ebp: return gpr_ebp;
953 case dwarf_esp: return gpr_esp;
954 case dwarf_eip: return gpr_eip;
955 case dwarf_xmm0: return fpu_xmm0;
956 case dwarf_xmm1: return fpu_xmm1;
957 case dwarf_xmm2: return fpu_xmm2;
958 case dwarf_xmm3: return fpu_xmm3;
959 case dwarf_xmm4: return fpu_xmm4;
960 case dwarf_xmm5: return fpu_xmm5;
961 case dwarf_xmm6: return fpu_xmm6;
962 case dwarf_xmm7: return fpu_xmm7;
963 case dwarf_stmm0: return fpu_stmm0;
964 case dwarf_stmm1: return fpu_stmm1;
965 case dwarf_stmm2: return fpu_stmm2;
966 case dwarf_stmm3: return fpu_stmm3;
967 case dwarf_stmm4: return fpu_stmm4;
968 case dwarf_stmm5: return fpu_stmm5;
969 case dwarf_stmm6: return fpu_stmm6;
970 case dwarf_stmm7: return fpu_stmm7;
972 return LLDB_INVALID_REGNUM;
976 if (kind == eRegisterKindGDB)
980 case gdb_eax : return gpr_eax;
981 case gdb_ebx : return gpr_ebx;
982 case gdb_ecx : return gpr_ecx;
983 case gdb_edx : return gpr_edx;
984 case gdb_esi : return gpr_esi;
985 case gdb_edi : return gpr_edi;
986 case gdb_ebp : return gpr_ebp;
987 case gdb_esp : return gpr_esp;
988 case gdb_eip : return gpr_eip;
989 case gdb_eflags : return gpr_eflags;
990 case gdb_cs : return gpr_cs;
991 case gdb_ss : return gpr_ss;
992 case gdb_ds : return gpr_ds;
993 case gdb_es : return gpr_es;
994 case gdb_fs : return gpr_fs;
995 case gdb_gs : return gpr_gs;
996 case gdb_stmm0 : return fpu_stmm0;
997 case gdb_stmm1 : return fpu_stmm1;
998 case gdb_stmm2 : return fpu_stmm2;
999 case gdb_stmm3 : return fpu_stmm3;
1000 case gdb_stmm4 : return fpu_stmm4;
1001 case gdb_stmm5 : return fpu_stmm5;
1002 case gdb_stmm6 : return fpu_stmm6;
1003 case gdb_stmm7 : return fpu_stmm7;
1004 case gdb_fcw : return fpu_fcw;
1005 case gdb_fsw : return fpu_fsw;
1006 case gdb_ftw : return fpu_ftw;
1007 case gdb_fpu_cs : return fpu_cs;
1008 case gdb_ip : return fpu_ip;
1009 case gdb_fpu_ds : return fpu_ds; //fpu_fos
1010 case gdb_dp : return fpu_dp; //fpu_foo
1011 case gdb_fop : return fpu_fop;
1012 case gdb_xmm0 : return fpu_xmm0;
1013 case gdb_xmm1 : return fpu_xmm1;
1014 case gdb_xmm2 : return fpu_xmm2;
1015 case gdb_xmm3 : return fpu_xmm3;
1016 case gdb_xmm4 : return fpu_xmm4;
1017 case gdb_xmm5 : return fpu_xmm5;
1018 case gdb_xmm6 : return fpu_xmm6;
1019 case gdb_xmm7 : return fpu_xmm7;
1020 case gdb_mxcsr : return fpu_mxcsr;
1022 return LLDB_INVALID_REGNUM;
1025 else if (kind == eRegisterKindLLDB)
1033 case ArchSpec::eCore_x86_64_x86_64:
1035 if (kind == eRegisterKindGeneric)
1039 case LLDB_REGNUM_GENERIC_PC: return gpr_rip;
1040 case LLDB_REGNUM_GENERIC_SP: return gpr_rsp;
1041 case LLDB_REGNUM_GENERIC_FP: return gpr_rbp;
1042 case LLDB_REGNUM_GENERIC_FLAGS: return gpr_rflags;
1043 case LLDB_REGNUM_GENERIC_RA:
1045 return LLDB_INVALID_REGNUM;
1049 if (kind == eRegisterKindGCC || kind == eRegisterKindDWARF)
1053 case gcc_dwarf_gpr_rax: return gpr_rax;
1054 case gcc_dwarf_gpr_rdx: return gpr_rdx;
1055 case gcc_dwarf_gpr_rcx: return gpr_rcx;
1056 case gcc_dwarf_gpr_rbx: return gpr_rbx;
1057 case gcc_dwarf_gpr_rsi: return gpr_rsi;
1058 case gcc_dwarf_gpr_rdi: return gpr_rdi;
1059 case gcc_dwarf_gpr_rbp: return gpr_rbp;
1060 case gcc_dwarf_gpr_rsp: return gpr_rsp;
1061 case gcc_dwarf_gpr_r8: return gpr_r8;
1062 case gcc_dwarf_gpr_r9: return gpr_r9;
1063 case gcc_dwarf_gpr_r10: return gpr_r10;
1064 case gcc_dwarf_gpr_r11: return gpr_r11;
1065 case gcc_dwarf_gpr_r12: return gpr_r12;
1066 case gcc_dwarf_gpr_r13: return gpr_r13;
1067 case gcc_dwarf_gpr_r14: return gpr_r14;
1068 case gcc_dwarf_gpr_r15: return gpr_r15;
1069 case gcc_dwarf_gpr_rip: return gpr_rip;
1070 case gcc_dwarf_fpu_xmm0: return fpu_xmm0;
1071 case gcc_dwarf_fpu_xmm1: return fpu_xmm1;
1072 case gcc_dwarf_fpu_xmm2: return fpu_xmm2;
1073 case gcc_dwarf_fpu_xmm3: return fpu_xmm3;
1074 case gcc_dwarf_fpu_xmm4: return fpu_xmm4;
1075 case gcc_dwarf_fpu_xmm5: return fpu_xmm5;
1076 case gcc_dwarf_fpu_xmm6: return fpu_xmm6;
1077 case gcc_dwarf_fpu_xmm7: return fpu_xmm7;
1078 case gcc_dwarf_fpu_xmm8: return fpu_xmm8;
1079 case gcc_dwarf_fpu_xmm9: return fpu_xmm9;
1080 case gcc_dwarf_fpu_xmm10: return fpu_xmm10;
1081 case gcc_dwarf_fpu_xmm11: return fpu_xmm11;
1082 case gcc_dwarf_fpu_xmm12: return fpu_xmm12;
1083 case gcc_dwarf_fpu_xmm13: return fpu_xmm13;
1084 case gcc_dwarf_fpu_xmm14: return fpu_xmm14;
1085 case gcc_dwarf_fpu_xmm15: return fpu_xmm15;
1086 case gcc_dwarf_fpu_stmm0: return fpu_stmm0;
1087 case gcc_dwarf_fpu_stmm1: return fpu_stmm1;
1088 case gcc_dwarf_fpu_stmm2: return fpu_stmm2;
1089 case gcc_dwarf_fpu_stmm3: return fpu_stmm3;
1090 case gcc_dwarf_fpu_stmm4: return fpu_stmm4;
1091 case gcc_dwarf_fpu_stmm5: return fpu_stmm5;
1092 case gcc_dwarf_fpu_stmm6: return fpu_stmm6;
1093 case gcc_dwarf_fpu_stmm7: return fpu_stmm7;
1094 case gcc_dwarf_fpu_ymm0: return fpu_ymm0;
1095 case gcc_dwarf_fpu_ymm1: return fpu_ymm1;
1096 case gcc_dwarf_fpu_ymm2: return fpu_ymm2;
1097 case gcc_dwarf_fpu_ymm3: return fpu_ymm3;
1098 case gcc_dwarf_fpu_ymm4: return fpu_ymm4;
1099 case gcc_dwarf_fpu_ymm5: return fpu_ymm5;
1100 case gcc_dwarf_fpu_ymm6: return fpu_ymm6;
1101 case gcc_dwarf_fpu_ymm7: return fpu_ymm7;
1102 case gcc_dwarf_fpu_ymm8: return fpu_ymm8;
1103 case gcc_dwarf_fpu_ymm9: return fpu_ymm9;
1104 case gcc_dwarf_fpu_ymm10: return fpu_ymm10;
1105 case gcc_dwarf_fpu_ymm11: return fpu_ymm11;
1106 case gcc_dwarf_fpu_ymm12: return fpu_ymm12;
1107 case gcc_dwarf_fpu_ymm13: return fpu_ymm13;
1108 case gcc_dwarf_fpu_ymm14: return fpu_ymm14;
1109 case gcc_dwarf_fpu_ymm15: return fpu_ymm15;
1111 return LLDB_INVALID_REGNUM;
1115 if (kind == eRegisterKindGDB)
1119 case gdb_gpr_rax : return gpr_rax;
1120 case gdb_gpr_rbx : return gpr_rbx;
1121 case gdb_gpr_rcx : return gpr_rcx;
1122 case gdb_gpr_rdx : return gpr_rdx;
1123 case gdb_gpr_rsi : return gpr_rsi;
1124 case gdb_gpr_rdi : return gpr_rdi;
1125 case gdb_gpr_rbp : return gpr_rbp;
1126 case gdb_gpr_rsp : return gpr_rsp;
1127 case gdb_gpr_r8 : return gpr_r8;
1128 case gdb_gpr_r9 : return gpr_r9;
1129 case gdb_gpr_r10 : return gpr_r10;
1130 case gdb_gpr_r11 : return gpr_r11;
1131 case gdb_gpr_r12 : return gpr_r12;
1132 case gdb_gpr_r13 : return gpr_r13;
1133 case gdb_gpr_r14 : return gpr_r14;
1134 case gdb_gpr_r15 : return gpr_r15;
1135 case gdb_gpr_rip : return gpr_rip;
1136 case gdb_gpr_rflags : return gpr_rflags;
1137 case gdb_gpr_cs : return gpr_cs;
1138 case gdb_gpr_ss : return gpr_ss;
1139 case gdb_gpr_ds : return gpr_ds;
1140 case gdb_gpr_es : return gpr_es;
1141 case gdb_gpr_fs : return gpr_fs;
1142 case gdb_gpr_gs : return gpr_gs;
1143 case gdb_fpu_stmm0 : return fpu_stmm0;
1144 case gdb_fpu_stmm1 : return fpu_stmm1;
1145 case gdb_fpu_stmm2 : return fpu_stmm2;
1146 case gdb_fpu_stmm3 : return fpu_stmm3;
1147 case gdb_fpu_stmm4 : return fpu_stmm4;
1148 case gdb_fpu_stmm5 : return fpu_stmm5;
1149 case gdb_fpu_stmm6 : return fpu_stmm6;
1150 case gdb_fpu_stmm7 : return fpu_stmm7;
1151 case gdb_fpu_fcw : return fpu_fcw;
1152 case gdb_fpu_fsw : return fpu_fsw;
1153 case gdb_fpu_ftw : return fpu_ftw;
1154 case gdb_fpu_cs_64 : return fpu_cs;
1155 case gdb_fpu_ip : return fpu_ip;
1156 case gdb_fpu_ds_64 : return fpu_ds;
1157 case gdb_fpu_dp : return fpu_dp;
1158 case gdb_fpu_fop : return fpu_fop;
1159 case gdb_fpu_xmm0 : return fpu_xmm0;
1160 case gdb_fpu_xmm1 : return fpu_xmm1;
1161 case gdb_fpu_xmm2 : return fpu_xmm2;
1162 case gdb_fpu_xmm3 : return fpu_xmm3;
1163 case gdb_fpu_xmm4 : return fpu_xmm4;
1164 case gdb_fpu_xmm5 : return fpu_xmm5;
1165 case gdb_fpu_xmm6 : return fpu_xmm6;
1166 case gdb_fpu_xmm7 : return fpu_xmm7;
1167 case gdb_fpu_xmm8 : return fpu_xmm8;
1168 case gdb_fpu_xmm9 : return fpu_xmm9;
1169 case gdb_fpu_xmm10 : return fpu_xmm10;
1170 case gdb_fpu_xmm11 : return fpu_xmm11;
1171 case gdb_fpu_xmm12 : return fpu_xmm12;
1172 case gdb_fpu_xmm13 : return fpu_xmm13;
1173 case gdb_fpu_xmm14 : return fpu_xmm14;
1174 case gdb_fpu_xmm15 : return fpu_xmm15;
1175 case gdb_fpu_mxcsr : return fpu_mxcsr;
1176 case gdb_fpu_ymm0 : return fpu_ymm0;
1177 case gdb_fpu_ymm1 : return fpu_ymm1;
1178 case gdb_fpu_ymm2 : return fpu_ymm2;
1179 case gdb_fpu_ymm3 : return fpu_ymm3;
1180 case gdb_fpu_ymm4 : return fpu_ymm4;
1181 case gdb_fpu_ymm5 : return fpu_ymm5;
1182 case gdb_fpu_ymm6 : return fpu_ymm6;
1183 case gdb_fpu_ymm7 : return fpu_ymm7;
1184 case gdb_fpu_ymm8 : return fpu_ymm8;
1185 case gdb_fpu_ymm9 : return fpu_ymm9;
1186 case gdb_fpu_ymm10 : return fpu_ymm10;
1187 case gdb_fpu_ymm11 : return fpu_ymm11;
1188 case gdb_fpu_ymm12 : return fpu_ymm12;
1189 case gdb_fpu_ymm13 : return fpu_ymm13;
1190 case gdb_fpu_ymm14 : return fpu_ymm14;
1191 case gdb_fpu_ymm15 : return fpu_ymm15;
1193 return LLDB_INVALID_REGNUM;
1196 else if (kind == eRegisterKindLLDB)
1204 return LLDB_INVALID_REGNUM;
1208 RegisterContext_x86_64::NumSupportedHardwareWatchpoints()
1210 // Available debug address registers: dr0, dr1, dr2, dr3
1215 RegisterContext_x86_64::IsWatchpointVacant(uint32_t hw_index)
1217 bool is_vacant = false;
1218 RegisterValue value;
1220 assert(hw_index < NumSupportedHardwareWatchpoints());
1222 if (m_watchpoints_initialized == false)
1224 // Reset the debug status and debug control registers
1225 RegisterValue zero_bits = RegisterValue(uint64_t(0));
1226 if (!WriteRegister(dr6, zero_bits) || !WriteRegister(dr7, zero_bits))
1227 assert(false && "Could not initialize watchpoint registers");
1228 m_watchpoints_initialized = true;
1231 if (ReadRegister(dr7, value))
1233 uint64_t val = value.GetAsUInt64();
1234 is_vacant = (val & (3 << 2*hw_index)) == 0;
1241 size_and_rw_bits(size_t size, bool read, bool write)
1245 rw = 0x3; // READ or READ/WRITE
1249 assert(0 && "read and write cannot both be false");
1256 return (0x1 << 2) | rw;
1258 return (0x3 << 2) | rw;
1260 return (0x2 << 2) | rw;
1262 assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
1267 RegisterContext_x86_64::SetHardwareWatchpoint(addr_t addr, size_t size,
1268 bool read, bool write)
1270 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
1273 for (hw_index = 0; hw_index < num_hw_watchpoints; ++hw_index)
1275 if (IsWatchpointVacant(hw_index))
1276 return SetHardwareWatchpointWithIndex(addr, size,
1281 return LLDB_INVALID_INDEX32;
1285 RegisterContext_x86_64::SetHardwareWatchpointWithIndex(addr_t addr, size_t size,
1286 bool read, bool write,
1289 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
1291 if (num_hw_watchpoints == 0 || hw_index >= num_hw_watchpoints)
1294 if (!(size == 1 || size == 2 || size == 4 || size == 8))
1297 if (read == false && write == false)
1300 if (!IsWatchpointVacant(hw_index))
1303 // Set both dr7 (debug control register) and dri (debug address register).
1305 // dr7{7-0} encodes the local/gloabl enable bits:
1306 // global enable --. .-- local enable
1314 // dr7{31-16} encodes the rw/len bits:
1315 // b_x+3, b_x+2, b_x+1, b_x
1316 // where bits{x+1, x} => rw
1317 // 0b00: execute, 0b01: write, 0b11: read-or-write,
1318 // 0b10: io read-or-write (unused)
1319 // and bits{x+3, x+2} => len
1320 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
1322 // dr0 -> bits{19-16}
1323 // dr1 -> bits{23-20}
1324 // dr2 -> bits{27-24}
1325 // dr3 -> bits{31-28}
1326 if (hw_index < num_hw_watchpoints)
1328 RegisterValue current_dr7_bits;
1330 if (ReadRegister(dr7, current_dr7_bits))
1332 uint64_t new_dr7_bits = current_dr7_bits.GetAsUInt64() |
1333 (1 << (2*hw_index) |
1334 size_and_rw_bits(size, read, write) <<
1337 if (WriteRegister(dr0 + hw_index, RegisterValue(addr)) &&
1338 WriteRegister(dr7, RegisterValue(new_dr7_bits)))
1347 RegisterContext_x86_64::ClearHardwareWatchpoint(uint32_t hw_index)
1349 if (hw_index < NumSupportedHardwareWatchpoints())
1351 RegisterValue current_dr7_bits;
1353 if (ReadRegister(dr7, current_dr7_bits))
1355 uint64_t new_dr7_bits = current_dr7_bits.GetAsUInt64() & ~(3 << (2*hw_index));
1357 if (WriteRegister(dr7, RegisterValue(new_dr7_bits)))
1366 RegisterContext_x86_64::IsWatchpointHit(uint32_t hw_index)
1368 bool is_hit = false;
1370 if (m_watchpoints_initialized == false)
1372 // Reset the debug status and debug control registers
1373 RegisterValue zero_bits = RegisterValue(uint64_t(0));
1374 if (!WriteRegister(dr6, zero_bits) || !WriteRegister(dr7, zero_bits))
1375 assert(false && "Could not initialize watchpoint registers");
1376 m_watchpoints_initialized = true;
1379 if (hw_index < NumSupportedHardwareWatchpoints())
1381 RegisterValue value;
1383 if (ReadRegister(dr6, value))
1385 uint64_t val = value.GetAsUInt64();
1386 is_hit = val & (1 << hw_index);
1394 RegisterContext_x86_64::GetWatchpointAddress(uint32_t hw_index)
1396 addr_t wp_monitor_addr = LLDB_INVALID_ADDRESS;
1398 if (hw_index < NumSupportedHardwareWatchpoints())
1400 if (!IsWatchpointVacant(hw_index))
1402 RegisterValue value;
1404 if (ReadRegister(dr0 + hw_index, value))
1405 wp_monitor_addr = value.GetAsUInt64();
1409 return wp_monitor_addr;
1414 RegisterContext_x86_64::ClearWatchpointHits()
1416 return WriteRegister(dr6, RegisterValue((uint64_t)0));
1420 RegisterContext_x86_64::HardwareSingleStep(bool enable)
1422 enum { TRACE_BIT = 0x100 };
1425 if ((rflags = ReadRegisterAsUnsigned(gpr_rflags, -1UL)) == -1UL)
1430 if (rflags & TRACE_BIT)
1433 rflags |= TRACE_BIT;
1437 if (!(rflags & TRACE_BIT))
1440 rflags &= ~TRACE_BIT;
1443 return WriteRegisterFromUnsigned(gpr_rflags, rflags);
1446 #if defined(__linux__) or defined(__FreeBSD__)
1449 RegisterContext_x86_64::GetMonitor()
1451 ProcessSP base = CalculateProcess();
1452 ProcessPOSIX *process = static_cast<ProcessPOSIX*>(base.get());
1453 return process->GetMonitor();
1457 RegisterContext_x86_64::ReadGPR()
1459 ProcessMonitor &monitor = GetMonitor();
1460 return monitor.ReadGPR(m_thread.GetID(), &m_gpr, GetGPRSize());
1464 RegisterContext_x86_64::ReadFPR()
1466 ProcessMonitor &monitor = GetMonitor();
1467 if (m_fpr_type == eFXSAVE)
1468 return monitor.ReadFPR(m_thread.GetID(), &m_fpr.xstate.fxsave, sizeof(m_fpr.xstate.fxsave));
1470 if (m_fpr_type == eXSAVE)
1471 return monitor.ReadRegisterSet(m_thread.GetID(), &m_iovec, sizeof(m_fpr.xstate.xsave), NT_X86_XSTATE);
1476 RegisterContext_x86_64::WriteGPR()
1478 ProcessMonitor &monitor = GetMonitor();
1479 return monitor.WriteGPR(m_thread.GetID(), &m_gpr, GetGPRSize());
1483 RegisterContext_x86_64::WriteFPR()
1485 ProcessMonitor &monitor = GetMonitor();
1486 if (m_fpr_type == eFXSAVE)
1487 return monitor.WriteFPR(m_thread.GetID(), &m_fpr.xstate.fxsave, sizeof(m_fpr.xstate.fxsave));
1489 if (m_fpr_type == eXSAVE)
1490 return monitor.WriteRegisterSet(m_thread.GetID(), &m_iovec, sizeof(m_fpr.xstate.xsave), NT_X86_XSTATE);
1495 RegisterContext_x86_64::ReadRegister(const unsigned reg,
1496 RegisterValue &value)
1498 ProcessMonitor &monitor = GetMonitor();
1499 return monitor.ReadRegisterValue(m_thread.GetID(),
1500 GetRegisterOffset(reg),
1501 GetRegisterName(reg),
1502 GetRegisterSize(reg),
1507 RegisterContext_x86_64::WriteRegister(const unsigned reg,
1508 const RegisterValue &value)
1510 ProcessMonitor &monitor = GetMonitor();
1511 return monitor.WriteRegisterValue(m_thread.GetID(),
1512 GetRegisterOffset(reg),
1513 GetRegisterName(reg),
1520 RegisterContext_x86_64::ReadGPR()
1522 llvm_unreachable("not implemented");
1527 RegisterContext_x86_64::ReadFPR()
1529 llvm_unreachable("not implemented");
1534 RegisterContext_x86_64::WriteGPR()
1536 llvm_unreachable("not implemented");
1541 RegisterContext_x86_64::WriteFPR()
1543 llvm_unreachable("not implemented");
1548 RegisterContext_x86_64::ReadRegister(const unsigned reg,
1549 RegisterValue &value)
1551 llvm_unreachable("not implemented");
1556 RegisterContext_x86_64::WriteRegister(const unsigned reg,
1557 const RegisterValue &value)
1559 llvm_unreachable("not implemented");