1 //===-------------------- UnwindRegistersRestore.S ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is dual licensed under the MIT and the University of Illinois Open
6 // Source Licenses. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
14 #if !defined(__USING_SJLJ_EXCEPTIONS__)
17 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)
19 # void libunwind::Registers_x86::jumpto()
22 # On windows, the 'this' pointer is passed in ecx instead of on the stack
27 # +-----------------------+
28 # + thread_state pointer +
29 # +-----------------------+
31 # +-----------------------+ <-- SP
35 # set up eax and ret on new stack location
36 movl 28(%eax), %edx # edx holds new stack pointer
43 # we now have ret and eax pushed onto where new stack will be
44 # restore all registers
54 pop %eax # eax was already pushed on new stack
55 ret # eip was already pushed on new stack
62 #elif defined(__x86_64__)
64 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)
66 # void libunwind::Registers_x86_64::jumpto()
69 # On entry, thread_state pointer is in rcx; move it into rdi
70 # to share restore code below. Since this routine restores and
71 # overwrites all registers, we can use the same registers for
72 # pointers and temporaries as on unix even though win64 normally
73 # mustn't clobber some of them.
76 # On entry, thread_state pointer is in rdi
79 movq 56(%rdi), %rax # rax holds new stack pointer
82 movq 32(%rdi), %rbx # store new rdi on new stack
84 movq 128(%rdi), %rbx # store new rip on new stack
86 # restore all registers
109 movdqu 176(%rdi),%xmm0
110 movdqu 192(%rdi),%xmm1
111 movdqu 208(%rdi),%xmm2
112 movdqu 224(%rdi),%xmm3
113 movdqu 240(%rdi),%xmm4
114 movdqu 256(%rdi),%xmm5
115 movdqu 272(%rdi),%xmm6
116 movdqu 288(%rdi),%xmm7
117 movdqu 304(%rdi),%xmm8
118 movdqu 320(%rdi),%xmm9
119 movdqu 336(%rdi),%xmm10
120 movdqu 352(%rdi),%xmm11
121 movdqu 368(%rdi),%xmm12
122 movdqu 384(%rdi),%xmm13
123 movdqu 400(%rdi),%xmm14
124 movdqu 416(%rdi),%xmm15
126 movq 56(%rdi), %rsp # cut back rsp to new location
127 pop %rdi # rdi was saved here earlier
128 ret # rip was saved here
131 #elif defined(__powerpc64__)
133 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
135 // void libunwind::Registers_ppc64::jumpto()
138 // thread_state pointer is in r3
141 // load register (GPR)
142 #define PPC64_LR(n) \
143 ld %r##n, (8 * (n + 2))(%r3)
145 // restore integral registers
181 // restore VS registers
182 // (note that this also restores floating point registers and V registers,
183 // because part of VS is mapped to these registers)
185 addi %r4, %r3, PPC64_OFFS_FP
188 #define PPC64_LVS(n) \
189 lxvd2x %vs##n, 0, %r4 ;\
192 // restore the first 32 VS regs (and also all floating point regs)
226 // use VRSAVE to conditionally restore the remaining VS regs,
227 // that are where the V regs are mapped
229 ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave
233 // conditionally load VS
234 #define PPC64_CLVS_BOTTOM(n) \
236 addi %r4, %r3, PPC64_OFFS_FP + n * 16 ;\
237 lxvd2x %vs##n, 0, %r4 ;\
240 #define PPC64_CLVSl(n) \
241 andis. %r0, %r5, (1<<(47-n)) ;\
244 #define PPC64_CLVSh(n) \
245 andi. %r0, %r5, (1<<(63-n)) ;\
284 #define PPC64_LF(n) \
285 lfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3)
287 // restore float registers
321 // restore vector registers if any are in use
322 ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave
327 // r4 is now a 16-byte aligned pointer into the red zone
328 // the _vectorScalarRegisters may not be 16-byte aligned
329 // so copy via red zone temp buffer
331 #define PPC64_CLV_UNALIGNED_BOTTOM(n) \
333 ld %r0, (PPC64_OFFS_V + n * 16)(%r3) ;\
335 ld %r0, (PPC64_OFFS_V + n * 16 + 8)(%r3) ;\
340 #define PPC64_CLV_UNALIGNEDl(n) \
341 andis. %r0, %r5, (1<<(15-n)) ;\
342 PPC64_CLV_UNALIGNED_BOTTOM(n)
344 #define PPC64_CLV_UNALIGNEDh(n) \
345 andi. %r0, %r5, (1<<(31-n)) ;\
346 PPC64_CLV_UNALIGNED_BOTTOM(n)
348 PPC64_CLV_UNALIGNEDl(0)
349 PPC64_CLV_UNALIGNEDl(1)
350 PPC64_CLV_UNALIGNEDl(2)
351 PPC64_CLV_UNALIGNEDl(3)
352 PPC64_CLV_UNALIGNEDl(4)
353 PPC64_CLV_UNALIGNEDl(5)
354 PPC64_CLV_UNALIGNEDl(6)
355 PPC64_CLV_UNALIGNEDl(7)
356 PPC64_CLV_UNALIGNEDl(8)
357 PPC64_CLV_UNALIGNEDl(9)
358 PPC64_CLV_UNALIGNEDl(10)
359 PPC64_CLV_UNALIGNEDl(11)
360 PPC64_CLV_UNALIGNEDl(12)
361 PPC64_CLV_UNALIGNEDl(13)
362 PPC64_CLV_UNALIGNEDl(14)
363 PPC64_CLV_UNALIGNEDl(15)
364 PPC64_CLV_UNALIGNEDh(16)
365 PPC64_CLV_UNALIGNEDh(17)
366 PPC64_CLV_UNALIGNEDh(18)
367 PPC64_CLV_UNALIGNEDh(19)
368 PPC64_CLV_UNALIGNEDh(20)
369 PPC64_CLV_UNALIGNEDh(21)
370 PPC64_CLV_UNALIGNEDh(22)
371 PPC64_CLV_UNALIGNEDh(23)
372 PPC64_CLV_UNALIGNEDh(24)
373 PPC64_CLV_UNALIGNEDh(25)
374 PPC64_CLV_UNALIGNEDh(26)
375 PPC64_CLV_UNALIGNEDh(27)
376 PPC64_CLV_UNALIGNEDh(28)
377 PPC64_CLV_UNALIGNEDh(29)
378 PPC64_CLV_UNALIGNEDh(30)
379 PPC64_CLV_UNALIGNEDh(31)
384 ld %r0, PPC64_OFFS_CR(%r3)
386 ld %r0, PPC64_OFFS_SRR0(%r3)
396 #elif defined(__ppc__)
398 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
400 ; void libunwind::Registers_ppc::jumpto()
403 ; thread_state pointer is in r3
406 ; restore integral registerrs
440 ; restore float registers
474 ; restore vector registers if any are in use
475 lwz r5,156(r3) ; test VRsave
480 rlwinm r4,r4,0,0,27 ; mask low 4-bits
481 ; r4 is now a 16-byte aligned pointer into the red zone
482 ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
485 #define LOAD_VECTOR_UNALIGNEDl(_index) \
486 andis. r0,r5,(1<<(15-_index)) @\
487 beq Ldone ## _index @\
488 lwz r0, 424+_index*16(r3) @\
490 lwz r0, 424+_index*16+4(r3) @\
492 lwz r0, 424+_index*16+8(r3) @\
494 lwz r0, 424+_index*16+12(r3)@\
496 lvx v ## _index,0,r4 @\
499 #define LOAD_VECTOR_UNALIGNEDh(_index) \
500 andi. r0,r5,(1<<(31-_index)) @\
501 beq Ldone ## _index @\
502 lwz r0, 424+_index*16(r3) @\
504 lwz r0, 424+_index*16+4(r3) @\
506 lwz r0, 424+_index*16+8(r3) @\
508 lwz r0, 424+_index*16+12(r3)@\
510 lvx v ## _index,0,r4 @\
514 LOAD_VECTOR_UNALIGNEDl(0)
515 LOAD_VECTOR_UNALIGNEDl(1)
516 LOAD_VECTOR_UNALIGNEDl(2)
517 LOAD_VECTOR_UNALIGNEDl(3)
518 LOAD_VECTOR_UNALIGNEDl(4)
519 LOAD_VECTOR_UNALIGNEDl(5)
520 LOAD_VECTOR_UNALIGNEDl(6)
521 LOAD_VECTOR_UNALIGNEDl(7)
522 LOAD_VECTOR_UNALIGNEDl(8)
523 LOAD_VECTOR_UNALIGNEDl(9)
524 LOAD_VECTOR_UNALIGNEDl(10)
525 LOAD_VECTOR_UNALIGNEDl(11)
526 LOAD_VECTOR_UNALIGNEDl(12)
527 LOAD_VECTOR_UNALIGNEDl(13)
528 LOAD_VECTOR_UNALIGNEDl(14)
529 LOAD_VECTOR_UNALIGNEDl(15)
530 LOAD_VECTOR_UNALIGNEDh(16)
531 LOAD_VECTOR_UNALIGNEDh(17)
532 LOAD_VECTOR_UNALIGNEDh(18)
533 LOAD_VECTOR_UNALIGNEDh(19)
534 LOAD_VECTOR_UNALIGNEDh(20)
535 LOAD_VECTOR_UNALIGNEDh(21)
536 LOAD_VECTOR_UNALIGNEDh(22)
537 LOAD_VECTOR_UNALIGNEDh(23)
538 LOAD_VECTOR_UNALIGNEDh(24)
539 LOAD_VECTOR_UNALIGNEDh(25)
540 LOAD_VECTOR_UNALIGNEDh(26)
541 LOAD_VECTOR_UNALIGNEDh(27)
542 LOAD_VECTOR_UNALIGNEDh(28)
543 LOAD_VECTOR_UNALIGNEDh(29)
544 LOAD_VECTOR_UNALIGNEDh(30)
545 LOAD_VECTOR_UNALIGNEDh(31)
548 lwz r0, 136(r3) ; __cr
550 lwz r0, 148(r3) ; __ctr
552 lwz r0, 0(r3) ; __ssr0
554 lwz r0, 8(r3) ; do r0 now
555 lwz r5,28(r3) ; do r5 now
556 lwz r4,24(r3) ; do r4 now
557 lwz r1,12(r3) ; do sp now
558 lwz r3,20(r3) ; do r3 last
561 #elif defined(__arm64__) || defined(__aarch64__)
564 // void libunwind::Registers_arm64::jumpto()
567 // thread_state pointer is in x0
570 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)
571 // skip restore of x0,x1 for now
572 ldp x2, x3, [x0, #0x010]
573 ldp x4, x5, [x0, #0x020]
574 ldp x6, x7, [x0, #0x030]
575 ldp x8, x9, [x0, #0x040]
576 ldp x10,x11, [x0, #0x050]
577 ldp x12,x13, [x0, #0x060]
578 ldp x14,x15, [x0, #0x070]
579 ldp x16,x17, [x0, #0x080]
580 ldp x18,x19, [x0, #0x090]
581 ldp x20,x21, [x0, #0x0A0]
582 ldp x22,x23, [x0, #0x0B0]
583 ldp x24,x25, [x0, #0x0C0]
584 ldp x26,x27, [x0, #0x0D0]
585 ldp x28,x29, [x0, #0x0E0]
586 ldr x30, [x0, #0x100] // restore pc into lr
588 mov sp,x1 // restore sp
590 ldp d0, d1, [x0, #0x110]
591 ldp d2, d3, [x0, #0x120]
592 ldp d4, d5, [x0, #0x130]
593 ldp d6, d7, [x0, #0x140]
594 ldp d8, d9, [x0, #0x150]
595 ldp d10,d11, [x0, #0x160]
596 ldp d12,d13, [x0, #0x170]
597 ldp d14,d15, [x0, #0x180]
598 ldp d16,d17, [x0, #0x190]
599 ldp d18,d19, [x0, #0x1A0]
600 ldp d20,d21, [x0, #0x1B0]
601 ldp d22,d23, [x0, #0x1C0]
602 ldp d24,d25, [x0, #0x1D0]
603 ldp d26,d27, [x0, #0x1E0]
604 ldp d28,d29, [x0, #0x1F0]
605 ldr d30, [x0, #0x200]
606 ldr d31, [x0, #0x208]
608 ldp x0, x1, [x0, #0x000] // restore x0,x1
609 ret x30 // jump to pc
611 #elif defined(__arm__) && !defined(__APPLE__)
613 #if !defined(__ARM_ARCH_ISA_ARM)
618 @ void libunwind::Registers_arm::restoreCoreAndJumpTo()
621 @ thread_state pointer is in r0
624 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
625 #if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
626 @ r8-r11: ldm into r1-r4, then mov to r8-r11
634 @ r12 does not need loading, it it the intra-procedure-call scratch register
638 mov lr, r3 @ restore pc into lr
641 @ Use lr as base so that r0 can be restored.
643 @ 32bit thumb-2 restrictions for ldm:
644 @ . the sp (r13) cannot be in the list
645 @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
648 ldr lr, [lr, #60] @ restore pc into lr
653 @ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
656 @ values pointer is in r0
662 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPy)
663 @ VFP and iwMMX instructions are only available when compiling with the flags
664 @ that enable them. We do not want to do that in the library (because we do not
665 @ want the compiler to generate instructions that access those) but this is
666 @ only accessed if the personality routine needs these registers. Use of
667 @ these registers implies they are, actually, available on the target, so
668 @ it's ok to execute.
669 @ So, generate the instruction using the corresponding coprocessor mnemonic.
674 @ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
677 @ values pointer is in r0
683 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPy)
684 vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
688 @ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
691 @ values pointer is in r0
697 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPy)
701 #if defined(__ARM_WMMX)
704 @ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
707 @ values pointer is in r0
713 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPy)
714 ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8
715 ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8
716 ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8
717 ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8
718 ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8
719 ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8
720 ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8
721 ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8
722 ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8
723 ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8
724 ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8
725 ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8
726 ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8
727 ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8
728 ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8
729 ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8
733 @ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
736 @ values pointer is in r0
742 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
743 ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4
744 ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4
745 ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4
746 ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4
751 #elif defined(__or1k__)
753 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
755 # void libunwind::Registers_or1k::jumpto()
758 # thread_state pointer is in r3
761 # restore integral registers
795 # at last, restore r3
798 # load new pc into ra
804 #elif defined(__riscv)
807 // void libunwind::Registers_riscv::jumpto()
810 // thread_state pointer is in a0
813 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
814 #ifdef __riscv_float_abi_double
815 fld f0, (8 * 32 + 8 * 0)(a0)
816 fld f1, (8 * 32 + 8 * 1)(a0)
817 fld f2, (8 * 32 + 8 * 2)(a0)
818 fld f3, (8 * 32 + 8 * 3)(a0)
819 fld f4, (8 * 32 + 8 * 4)(a0)
820 fld f5, (8 * 32 + 8 * 5)(a0)
821 fld f6, (8 * 32 + 8 * 6)(a0)
822 fld f7, (8 * 32 + 8 * 7)(a0)
823 fld f8, (8 * 32 + 8 * 8)(a0)
824 fld f9, (8 * 32 + 8 * 9)(a0)
825 fld f10, (8 * 32 + 8 * 10)(a0)
826 fld f11, (8 * 32 + 8 * 11)(a0)
827 fld f12, (8 * 32 + 8 * 12)(a0)
828 fld f13, (8 * 32 + 8 * 13)(a0)
829 fld f14, (8 * 32 + 8 * 14)(a0)
830 fld f15, (8 * 32 + 8 * 15)(a0)
831 fld f16, (8 * 32 + 8 * 16)(a0)
832 fld f17, (8 * 32 + 8 * 17)(a0)
833 fld f18, (8 * 32 + 8 * 18)(a0)
834 fld f19, (8 * 32 + 8 * 19)(a0)
835 fld f20, (8 * 32 + 8 * 20)(a0)
836 fld f21, (8 * 32 + 8 * 21)(a0)
837 fld f22, (8 * 32 + 8 * 22)(a0)
838 fld f23, (8 * 32 + 8 * 23)(a0)
839 fld f24, (8 * 32 + 8 * 24)(a0)
840 fld f25, (8 * 32 + 8 * 25)(a0)
841 fld f26, (8 * 32 + 8 * 26)(a0)
842 fld f27, (8 * 32 + 8 * 27)(a0)
843 fld f28, (8 * 32 + 8 * 28)(a0)
844 fld f29, (8 * 32 + 8 * 29)(a0)
845 fld f30, (8 * 32 + 8 * 30)(a0)
846 fld f31, (8 * 32 + 8 * 31)(a0)
881 ld x10, (8 * 10)(a0) // restore a0
885 #elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
888 // void libunwind::Registers_mips_o32::jumpto()
891 // thread state pointer is in a0 ($4)
893 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
898 #ifdef __mips_hard_float
900 ldc1 $f0, (4 * 36 + 8 * 0)($4)
901 ldc1 $f2, (4 * 36 + 8 * 2)($4)
902 ldc1 $f4, (4 * 36 + 8 * 4)($4)
903 ldc1 $f6, (4 * 36 + 8 * 6)($4)
904 ldc1 $f8, (4 * 36 + 8 * 8)($4)
905 ldc1 $f10, (4 * 36 + 8 * 10)($4)
906 ldc1 $f12, (4 * 36 + 8 * 12)($4)
907 ldc1 $f14, (4 * 36 + 8 * 14)($4)
908 ldc1 $f16, (4 * 36 + 8 * 16)($4)
909 ldc1 $f18, (4 * 36 + 8 * 18)($4)
910 ldc1 $f20, (4 * 36 + 8 * 20)($4)
911 ldc1 $f22, (4 * 36 + 8 * 22)($4)
912 ldc1 $f24, (4 * 36 + 8 * 24)($4)
913 ldc1 $f26, (4 * 36 + 8 * 26)($4)
914 ldc1 $f28, (4 * 36 + 8 * 28)($4)
915 ldc1 $f30, (4 * 36 + 8 * 30)($4)
917 ldc1 $f0, (4 * 36 + 8 * 0)($4)
918 ldc1 $f1, (4 * 36 + 8 * 1)($4)
919 ldc1 $f2, (4 * 36 + 8 * 2)($4)
920 ldc1 $f3, (4 * 36 + 8 * 3)($4)
921 ldc1 $f4, (4 * 36 + 8 * 4)($4)
922 ldc1 $f5, (4 * 36 + 8 * 5)($4)
923 ldc1 $f6, (4 * 36 + 8 * 6)($4)
924 ldc1 $f7, (4 * 36 + 8 * 7)($4)
925 ldc1 $f8, (4 * 36 + 8 * 8)($4)
926 ldc1 $f9, (4 * 36 + 8 * 9)($4)
927 ldc1 $f10, (4 * 36 + 8 * 10)($4)
928 ldc1 $f11, (4 * 36 + 8 * 11)($4)
929 ldc1 $f12, (4 * 36 + 8 * 12)($4)
930 ldc1 $f13, (4 * 36 + 8 * 13)($4)
931 ldc1 $f14, (4 * 36 + 8 * 14)($4)
932 ldc1 $f15, (4 * 36 + 8 * 15)($4)
933 ldc1 $f16, (4 * 36 + 8 * 16)($4)
934 ldc1 $f17, (4 * 36 + 8 * 17)($4)
935 ldc1 $f18, (4 * 36 + 8 * 18)($4)
936 ldc1 $f19, (4 * 36 + 8 * 19)($4)
937 ldc1 $f20, (4 * 36 + 8 * 20)($4)
938 ldc1 $f21, (4 * 36 + 8 * 21)($4)
939 ldc1 $f22, (4 * 36 + 8 * 22)($4)
940 ldc1 $f23, (4 * 36 + 8 * 23)($4)
941 ldc1 $f24, (4 * 36 + 8 * 24)($4)
942 ldc1 $f25, (4 * 36 + 8 * 25)($4)
943 ldc1 $f26, (4 * 36 + 8 * 26)($4)
944 ldc1 $f27, (4 * 36 + 8 * 27)($4)
945 ldc1 $f28, (4 * 36 + 8 * 28)($4)
946 ldc1 $f29, (4 * 36 + 8 * 29)($4)
947 ldc1 $f30, (4 * 36 + 8 * 30)($4)
948 ldc1 $f31, (4 * 36 + 8 * 31)($4)
987 // load new pc into ra
989 // jump to ra, load a0 in the delay slot
994 #elif defined(__mips64)
997 // void libunwind::Registers_mips_newabi::jumpto()
1000 // thread state pointer is in a0 ($4)
1002 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
1007 #ifdef __mips_hard_float
1008 ldc1 $f0, (8 * 35)($4)
1009 ldc1 $f1, (8 * 36)($4)
1010 ldc1 $f2, (8 * 37)($4)
1011 ldc1 $f3, (8 * 38)($4)
1012 ldc1 $f4, (8 * 39)($4)
1013 ldc1 $f5, (8 * 40)($4)
1014 ldc1 $f6, (8 * 41)($4)
1015 ldc1 $f7, (8 * 42)($4)
1016 ldc1 $f8, (8 * 43)($4)
1017 ldc1 $f9, (8 * 44)($4)
1018 ldc1 $f10, (8 * 45)($4)
1019 ldc1 $f11, (8 * 46)($4)
1020 ldc1 $f12, (8 * 47)($4)
1021 ldc1 $f13, (8 * 48)($4)
1022 ldc1 $f14, (8 * 49)($4)
1023 ldc1 $f15, (8 * 50)($4)
1024 ldc1 $f16, (8 * 51)($4)
1025 ldc1 $f17, (8 * 52)($4)
1026 ldc1 $f18, (8 * 53)($4)
1027 ldc1 $f19, (8 * 54)($4)
1028 ldc1 $f20, (8 * 55)($4)
1029 ldc1 $f21, (8 * 56)($4)
1030 ldc1 $f22, (8 * 57)($4)
1031 ldc1 $f23, (8 * 58)($4)
1032 ldc1 $f24, (8 * 59)($4)
1033 ldc1 $f25, (8 * 60)($4)
1034 ldc1 $f26, (8 * 61)($4)
1035 ldc1 $f27, (8 * 62)($4)
1036 ldc1 $f28, (8 * 63)($4)
1037 ldc1 $f29, (8 * 64)($4)
1038 ldc1 $f30, (8 * 65)($4)
1039 ldc1 $f31, (8 * 66)($4)
1041 // restore hi and lo
1056 ld $10, (8 * 10)($4)
1057 ld $11, (8 * 11)($4)
1058 ld $12, (8 * 12)($4)
1059 ld $13, (8 * 13)($4)
1060 ld $14, (8 * 14)($4)
1061 ld $15, (8 * 15)($4)
1062 ld $16, (8 * 16)($4)
1063 ld $17, (8 * 17)($4)
1064 ld $18, (8 * 18)($4)
1065 ld $19, (8 * 19)($4)
1066 ld $20, (8 * 20)($4)
1067 ld $21, (8 * 21)($4)
1068 ld $22, (8 * 22)($4)
1069 ld $23, (8 * 23)($4)
1070 ld $24, (8 * 24)($4)
1071 ld $25, (8 * 25)($4)
1072 ld $26, (8 * 26)($4)
1073 ld $27, (8 * 27)($4)
1074 ld $28, (8 * 28)($4)
1075 ld $29, (8 * 29)($4)
1076 ld $30, (8 * 30)($4)
1077 // load new pc into ra
1078 ld $31, (8 * 32)($4)
1079 // jump to ra, load a0 in the delay slot
1084 #elif defined(__sparc__)
1087 // void libunwind::Registers_sparc_o32::jumpto()
1090 // thread_state pointer is in o0
1092 DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1099 ldd [%o0 + 104], %i2
1100 ldd [%o0 + 112], %i4
1101 ldd [%o0 + 120], %i6
1108 #endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1110 NO_EXEC_STACK_DIRECTIVE