1 //===------------------------ UnwindRegistersSave.S -----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
13 #if !defined(__USING_SJLJ_EXCEPTIONS__)
18 # extern int __unw_getcontext(unw_context_t* thread_state)
22 # +-----------------------+
23 # + thread_state pointer +
24 # +-----------------------+
26 # +-----------------------+ <-- SP
29 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
40 movl %edx, 28(%eax) # store what sp was at call site as esp
44 movl %edx, 40(%eax) # store return address as eip
51 movl %edx, (%eax) # store original eax
53 xorl %eax, %eax # return UNW_ESUCCESS
56 #elif defined(__x86_64__)
59 # extern int __unw_getcontext(unw_context_t* thread_state)
62 # thread_state pointer is in rdi
64 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
91 movq TMP,128(PTR) # store return address as rip
100 movdqu %xmm2,208(PTR)
101 movdqu %xmm3,224(PTR)
102 movdqu %xmm4,240(PTR)
103 movdqu %xmm5,256(PTR)
104 movdqu %xmm6,272(PTR)
105 movdqu %xmm7,288(PTR)
106 movdqu %xmm8,304(PTR)
107 movdqu %xmm9,320(PTR)
108 movdqu %xmm10,336(PTR)
109 movdqu %xmm11,352(PTR)
110 movdqu %xmm12,368(PTR)
111 movdqu %xmm13,384(PTR)
112 movdqu %xmm14,400(PTR)
113 movdqu %xmm15,416(PTR)
115 xorl %eax, %eax # return UNW_ESUCCESS
118 #elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
121 # extern int __unw_getcontext(unw_context_t* thread_state)
124 # thread_state pointer is in a0 ($4)
126 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
162 # Store return address to pc
169 #ifdef __mips_hard_float
171 sdc1 $f0, (4 * 36 + 8 * 0)($4)
172 sdc1 $f2, (4 * 36 + 8 * 2)($4)
173 sdc1 $f4, (4 * 36 + 8 * 4)($4)
174 sdc1 $f6, (4 * 36 + 8 * 6)($4)
175 sdc1 $f8, (4 * 36 + 8 * 8)($4)
176 sdc1 $f10, (4 * 36 + 8 * 10)($4)
177 sdc1 $f12, (4 * 36 + 8 * 12)($4)
178 sdc1 $f14, (4 * 36 + 8 * 14)($4)
179 sdc1 $f16, (4 * 36 + 8 * 16)($4)
180 sdc1 $f18, (4 * 36 + 8 * 18)($4)
181 sdc1 $f20, (4 * 36 + 8 * 20)($4)
182 sdc1 $f22, (4 * 36 + 8 * 22)($4)
183 sdc1 $f24, (4 * 36 + 8 * 24)($4)
184 sdc1 $f26, (4 * 36 + 8 * 26)($4)
185 sdc1 $f28, (4 * 36 + 8 * 28)($4)
186 sdc1 $f30, (4 * 36 + 8 * 30)($4)
188 sdc1 $f0, (4 * 36 + 8 * 0)($4)
189 sdc1 $f1, (4 * 36 + 8 * 1)($4)
190 sdc1 $f2, (4 * 36 + 8 * 2)($4)
191 sdc1 $f3, (4 * 36 + 8 * 3)($4)
192 sdc1 $f4, (4 * 36 + 8 * 4)($4)
193 sdc1 $f5, (4 * 36 + 8 * 5)($4)
194 sdc1 $f6, (4 * 36 + 8 * 6)($4)
195 sdc1 $f7, (4 * 36 + 8 * 7)($4)
196 sdc1 $f8, (4 * 36 + 8 * 8)($4)
197 sdc1 $f9, (4 * 36 + 8 * 9)($4)
198 sdc1 $f10, (4 * 36 + 8 * 10)($4)
199 sdc1 $f11, (4 * 36 + 8 * 11)($4)
200 sdc1 $f12, (4 * 36 + 8 * 12)($4)
201 sdc1 $f13, (4 * 36 + 8 * 13)($4)
202 sdc1 $f14, (4 * 36 + 8 * 14)($4)
203 sdc1 $f15, (4 * 36 + 8 * 15)($4)
204 sdc1 $f16, (4 * 36 + 8 * 16)($4)
205 sdc1 $f17, (4 * 36 + 8 * 17)($4)
206 sdc1 $f18, (4 * 36 + 8 * 18)($4)
207 sdc1 $f19, (4 * 36 + 8 * 19)($4)
208 sdc1 $f20, (4 * 36 + 8 * 20)($4)
209 sdc1 $f21, (4 * 36 + 8 * 21)($4)
210 sdc1 $f22, (4 * 36 + 8 * 22)($4)
211 sdc1 $f23, (4 * 36 + 8 * 23)($4)
212 sdc1 $f24, (4 * 36 + 8 * 24)($4)
213 sdc1 $f25, (4 * 36 + 8 * 25)($4)
214 sdc1 $f26, (4 * 36 + 8 * 26)($4)
215 sdc1 $f27, (4 * 36 + 8 * 27)($4)
216 sdc1 $f28, (4 * 36 + 8 * 28)($4)
217 sdc1 $f29, (4 * 36 + 8 * 29)($4)
218 sdc1 $f30, (4 * 36 + 8 * 30)($4)
219 sdc1 $f31, (4 * 36 + 8 * 31)($4)
223 # return UNW_ESUCCESS
227 #elif defined(__mips64)
230 # extern int __unw_getcontext(unw_context_t* thread_state)
233 # thread_state pointer is in a0 ($4)
235 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
271 # Store return address to pc
278 #ifdef __mips_hard_float
279 sdc1 $f0, (8 * 35)($4)
280 sdc1 $f1, (8 * 36)($4)
281 sdc1 $f2, (8 * 37)($4)
282 sdc1 $f3, (8 * 38)($4)
283 sdc1 $f4, (8 * 39)($4)
284 sdc1 $f5, (8 * 40)($4)
285 sdc1 $f6, (8 * 41)($4)
286 sdc1 $f7, (8 * 42)($4)
287 sdc1 $f8, (8 * 43)($4)
288 sdc1 $f9, (8 * 44)($4)
289 sdc1 $f10, (8 * 45)($4)
290 sdc1 $f11, (8 * 46)($4)
291 sdc1 $f12, (8 * 47)($4)
292 sdc1 $f13, (8 * 48)($4)
293 sdc1 $f14, (8 * 49)($4)
294 sdc1 $f15, (8 * 50)($4)
295 sdc1 $f16, (8 * 51)($4)
296 sdc1 $f17, (8 * 52)($4)
297 sdc1 $f18, (8 * 53)($4)
298 sdc1 $f19, (8 * 54)($4)
299 sdc1 $f20, (8 * 55)($4)
300 sdc1 $f21, (8 * 56)($4)
301 sdc1 $f22, (8 * 57)($4)
302 sdc1 $f23, (8 * 58)($4)
303 sdc1 $f24, (8 * 59)($4)
304 sdc1 $f25, (8 * 60)($4)
305 sdc1 $f26, (8 * 61)($4)
306 sdc1 $f27, (8 * 62)($4)
307 sdc1 $f28, (8 * 63)($4)
308 sdc1 $f29, (8 * 64)($4)
309 sdc1 $f30, (8 * 65)($4)
310 sdc1 $f31, (8 * 66)($4)
313 # return UNW_ESUCCESS
317 # elif defined(__mips__)
320 # extern int __unw_getcontext(unw_context_t* thread_state)
322 # Just trap for the time being.
323 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
326 #elif defined(__powerpc64__)
329 // extern int __unw_getcontext(unw_context_t* thread_state)
332 // thread_state pointer is in r3
334 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
336 // store register (GPR)
337 #define PPC64_STR(n) \
338 std %r##n, (8 * (n + 2))(%r3)
343 std %r0, PPC64_OFFS_SRR0(%r3) // store lr as ssr0
377 std %r0, PPC64_OFFS_CR(%r3)
379 std %r0, PPC64_OFFS_XER(%r3)
381 std %r0, PPC64_OFFS_LR(%r3)
383 std %r0, PPC64_OFFS_CTR(%r3)
385 std %r0, PPC64_OFFS_VRSAVE(%r3)
389 // (note that this also saves floating point registers and V registers,
390 // because part of VS is mapped to these registers)
392 addi %r4, %r3, PPC64_OFFS_FP
395 #define PPC64_STVS(n) \
396 stxvd2x %vs##n, 0, %r4 ;\
467 #define PPC64_STF(n) \
468 stfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3)
470 // save float registers
504 // save vector registers
506 // Use 16-bytes below the stack pointer as an
507 // aligned buffer to save each vector register.
508 // Note that the stack pointer is always 16-byte aligned.
511 #define PPC64_STV_UNALIGNED(n) \
512 stvx %v##n, 0, %r4 ;\
514 std %r5, (PPC64_OFFS_V + n * 16)(%r3) ;\
516 std %r5, (PPC64_OFFS_V + n * 16 + 8)(%r3)
518 PPC64_STV_UNALIGNED(0)
519 PPC64_STV_UNALIGNED(1)
520 PPC64_STV_UNALIGNED(2)
521 PPC64_STV_UNALIGNED(3)
522 PPC64_STV_UNALIGNED(4)
523 PPC64_STV_UNALIGNED(5)
524 PPC64_STV_UNALIGNED(6)
525 PPC64_STV_UNALIGNED(7)
526 PPC64_STV_UNALIGNED(8)
527 PPC64_STV_UNALIGNED(9)
528 PPC64_STV_UNALIGNED(10)
529 PPC64_STV_UNALIGNED(11)
530 PPC64_STV_UNALIGNED(12)
531 PPC64_STV_UNALIGNED(13)
532 PPC64_STV_UNALIGNED(14)
533 PPC64_STV_UNALIGNED(15)
534 PPC64_STV_UNALIGNED(16)
535 PPC64_STV_UNALIGNED(17)
536 PPC64_STV_UNALIGNED(18)
537 PPC64_STV_UNALIGNED(19)
538 PPC64_STV_UNALIGNED(20)
539 PPC64_STV_UNALIGNED(21)
540 PPC64_STV_UNALIGNED(22)
541 PPC64_STV_UNALIGNED(23)
542 PPC64_STV_UNALIGNED(24)
543 PPC64_STV_UNALIGNED(25)
544 PPC64_STV_UNALIGNED(26)
545 PPC64_STV_UNALIGNED(27)
546 PPC64_STV_UNALIGNED(28)
547 PPC64_STV_UNALIGNED(29)
548 PPC64_STV_UNALIGNED(30)
549 PPC64_STV_UNALIGNED(31)
553 li %r3, 0 // return UNW_ESUCCESS
557 #elif defined(__ppc__)
560 // extern int __unw_getcontext(unw_context_t* thread_state)
563 // thread_state pointer is in r3
565 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
568 stw %r0, 0(%r3) // store lr as ssr0
601 // save VRSave register
611 // save float registers
646 // save vector registers
649 rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits
650 // r4 is now a 16-byte aligned pointer into the red zone
652 #define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
653 stvx _vec, 0, %r4 SEPARATOR \
654 lwz %r5, 0(%r4) SEPARATOR \
655 stw %r5, _offset(%r3) SEPARATOR \
656 lwz %r5, 4(%r4) SEPARATOR \
657 stw %r5, _offset+4(%r3) SEPARATOR \
658 lwz %r5, 8(%r4) SEPARATOR \
659 stw %r5, _offset+8(%r3) SEPARATOR \
660 lwz %r5, 12(%r4) SEPARATOR \
661 stw %r5, _offset+12(%r3)
663 SAVE_VECTOR_UNALIGNED( %v0, 424+0x000)
664 SAVE_VECTOR_UNALIGNED( %v1, 424+0x010)
665 SAVE_VECTOR_UNALIGNED( %v2, 424+0x020)
666 SAVE_VECTOR_UNALIGNED( %v3, 424+0x030)
667 SAVE_VECTOR_UNALIGNED( %v4, 424+0x040)
668 SAVE_VECTOR_UNALIGNED( %v5, 424+0x050)
669 SAVE_VECTOR_UNALIGNED( %v6, 424+0x060)
670 SAVE_VECTOR_UNALIGNED( %v7, 424+0x070)
671 SAVE_VECTOR_UNALIGNED( %v8, 424+0x080)
672 SAVE_VECTOR_UNALIGNED( %v9, 424+0x090)
673 SAVE_VECTOR_UNALIGNED(%v10, 424+0x0A0)
674 SAVE_VECTOR_UNALIGNED(%v11, 424+0x0B0)
675 SAVE_VECTOR_UNALIGNED(%v12, 424+0x0C0)
676 SAVE_VECTOR_UNALIGNED(%v13, 424+0x0D0)
677 SAVE_VECTOR_UNALIGNED(%v14, 424+0x0E0)
678 SAVE_VECTOR_UNALIGNED(%v15, 424+0x0F0)
679 SAVE_VECTOR_UNALIGNED(%v16, 424+0x100)
680 SAVE_VECTOR_UNALIGNED(%v17, 424+0x110)
681 SAVE_VECTOR_UNALIGNED(%v18, 424+0x120)
682 SAVE_VECTOR_UNALIGNED(%v19, 424+0x130)
683 SAVE_VECTOR_UNALIGNED(%v20, 424+0x140)
684 SAVE_VECTOR_UNALIGNED(%v21, 424+0x150)
685 SAVE_VECTOR_UNALIGNED(%v22, 424+0x160)
686 SAVE_VECTOR_UNALIGNED(%v23, 424+0x170)
687 SAVE_VECTOR_UNALIGNED(%v24, 424+0x180)
688 SAVE_VECTOR_UNALIGNED(%v25, 424+0x190)
689 SAVE_VECTOR_UNALIGNED(%v26, 424+0x1A0)
690 SAVE_VECTOR_UNALIGNED(%v27, 424+0x1B0)
691 SAVE_VECTOR_UNALIGNED(%v28, 424+0x1C0)
692 SAVE_VECTOR_UNALIGNED(%v29, 424+0x1D0)
693 SAVE_VECTOR_UNALIGNED(%v30, 424+0x1E0)
694 SAVE_VECTOR_UNALIGNED(%v31, 424+0x1F0)
696 li %r3, 0 // return UNW_ESUCCESS
700 #elif defined(__arm64__) || defined(__aarch64__)
703 // extern int __unw_getcontext(unw_context_t* thread_state)
706 // thread_state pointer is in x0
709 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
710 stp x0, x1, [x0, #0x000]
711 stp x2, x3, [x0, #0x010]
712 stp x4, x5, [x0, #0x020]
713 stp x6, x7, [x0, #0x030]
714 stp x8, x9, [x0, #0x040]
715 stp x10,x11, [x0, #0x050]
716 stp x12,x13, [x0, #0x060]
717 stp x14,x15, [x0, #0x070]
718 stp x16,x17, [x0, #0x080]
719 stp x18,x19, [x0, #0x090]
720 stp x20,x21, [x0, #0x0A0]
721 stp x22,x23, [x0, #0x0B0]
722 stp x24,x25, [x0, #0x0C0]
723 stp x26,x27, [x0, #0x0D0]
724 stp x28,x29, [x0, #0x0E0]
725 str x30, [x0, #0x0F0]
728 str x30, [x0, #0x100] // store return address as pc
730 stp d0, d1, [x0, #0x110]
731 stp d2, d3, [x0, #0x120]
732 stp d4, d5, [x0, #0x130]
733 stp d6, d7, [x0, #0x140]
734 stp d8, d9, [x0, #0x150]
735 stp d10,d11, [x0, #0x160]
736 stp d12,d13, [x0, #0x170]
737 stp d14,d15, [x0, #0x180]
738 stp d16,d17, [x0, #0x190]
739 stp d18,d19, [x0, #0x1A0]
740 stp d20,d21, [x0, #0x1B0]
741 stp d22,d23, [x0, #0x1C0]
742 stp d24,d25, [x0, #0x1D0]
743 stp d26,d27, [x0, #0x1E0]
744 stp d28,d29, [x0, #0x1F0]
745 str d30, [x0, #0x200]
746 str d31, [x0, #0x208]
747 mov x0, #0 // return UNW_ESUCCESS
750 #elif defined(__arm__) && !defined(__APPLE__)
752 #if !defined(__ARM_ARCH_ISA_ARM)
753 #if (__ARM_ARCH_ISA_THUMB == 2)
760 @ extern int __unw_getcontext(unw_context_t* thread_state)
763 @ thread_state pointer is in r0
765 @ Per EHABI #4.7 this only saves the core integer registers.
766 @ EHABI #7.4.5 notes that in general all VRS registers should be restored
767 @ however this is very hard to do for VFP registers because it is unknown
768 @ to the library how many registers are implemented by the architecture.
769 @ Instead, VFP registers are demand saved by logic external to __unw_getcontext.
772 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
773 #if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
782 str r1, [r0, #0] @ r11
783 @ r12 does not need storing, it it the intra-procedure-call scratch register
784 str r2, [r0, #8] @ sp
785 str r3, [r0, #12] @ lr
786 str r3, [r0, #16] @ store return address as pc
787 @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
788 @ It is safe to use here though because we are about to return, and cpsr is
789 @ not expected to be preserved.
790 movs r0, #0 @ return UNW_ESUCCESS
792 @ 32bit thumb-2 restrictions for stm:
793 @ . the sp (r13) cannot be in the list
794 @ . the pc (r15) cannot be in the list in an STM instruction
798 str lr, [r0, #60] @ store return address as pc
799 mov r0, #0 @ return UNW_ESUCCESS
804 @ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
807 @ values pointer is in r0
813 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
818 @ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
821 @ values pointer is in r0
827 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
828 vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
832 @ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
835 @ values pointer is in r0
841 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
842 @ VFP and iwMMX instructions are only available when compiling with the flags
843 @ that enable them. We do not want to do that in the library (because we do not
844 @ want the compiler to generate instructions that access those) but this is
845 @ only accessed if the personality routine needs these registers. Use of
846 @ these registers implies they are, actually, available on the target, so
847 @ it's ok to execute.
848 @ So, generate the instructions using the corresponding coprocessor mnemonic.
852 #if defined(_LIBUNWIND_ARM_WMMX)
855 @ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
858 @ values pointer is in r0
864 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
865 stcl p1, cr0, [r0], #8 @ wstrd wR0, [r0], #8
866 stcl p1, cr1, [r0], #8 @ wstrd wR1, [r0], #8
867 stcl p1, cr2, [r0], #8 @ wstrd wR2, [r0], #8
868 stcl p1, cr3, [r0], #8 @ wstrd wR3, [r0], #8
869 stcl p1, cr4, [r0], #8 @ wstrd wR4, [r0], #8
870 stcl p1, cr5, [r0], #8 @ wstrd wR5, [r0], #8
871 stcl p1, cr6, [r0], #8 @ wstrd wR6, [r0], #8
872 stcl p1, cr7, [r0], #8 @ wstrd wR7, [r0], #8
873 stcl p1, cr8, [r0], #8 @ wstrd wR8, [r0], #8
874 stcl p1, cr9, [r0], #8 @ wstrd wR9, [r0], #8
875 stcl p1, cr10, [r0], #8 @ wstrd wR10, [r0], #8
876 stcl p1, cr11, [r0], #8 @ wstrd wR11, [r0], #8
877 stcl p1, cr12, [r0], #8 @ wstrd wR12, [r0], #8
878 stcl p1, cr13, [r0], #8 @ wstrd wR13, [r0], #8
879 stcl p1, cr14, [r0], #8 @ wstrd wR14, [r0], #8
880 stcl p1, cr15, [r0], #8 @ wstrd wR15, [r0], #8
884 @ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
887 @ values pointer is in r0
893 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
894 stc2 p1, cr8, [r0], #4 @ wstrw wCGR0, [r0], #4
895 stc2 p1, cr9, [r0], #4 @ wstrw wCGR1, [r0], #4
896 stc2 p1, cr10, [r0], #4 @ wstrw wCGR2, [r0], #4
897 stc2 p1, cr11, [r0], #4 @ wstrw wCGR3, [r0], #4
902 #elif defined(__or1k__)
905 # extern int __unw_getcontext(unw_context_t* thread_state)
908 # thread_state pointer is in r3
910 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
948 #elif defined(__riscv)
951 # extern int __unw_getcontext(unw_context_t* thread_state)
954 # thread_state pointer is in a0
956 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
990 #ifdef __riscv_float_abi_double
991 fsd f0, (8 * 32 + 8 * 0)(a0)
992 fsd f1, (8 * 32 + 8 * 1)(a0)
993 fsd f2, (8 * 32 + 8 * 2)(a0)
994 fsd f3, (8 * 32 + 8 * 3)(a0)
995 fsd f4, (8 * 32 + 8 * 4)(a0)
996 fsd f5, (8 * 32 + 8 * 5)(a0)
997 fsd f6, (8 * 32 + 8 * 6)(a0)
998 fsd f7, (8 * 32 + 8 * 7)(a0)
999 fsd f8, (8 * 32 + 8 * 8)(a0)
1000 fsd f9, (8 * 32 + 8 * 9)(a0)
1001 fsd f10, (8 * 32 + 8 * 10)(a0)
1002 fsd f11, (8 * 32 + 8 * 11)(a0)
1003 fsd f12, (8 * 32 + 8 * 12)(a0)
1004 fsd f13, (8 * 32 + 8 * 13)(a0)
1005 fsd f14, (8 * 32 + 8 * 14)(a0)
1006 fsd f15, (8 * 32 + 8 * 15)(a0)
1007 fsd f16, (8 * 32 + 8 * 16)(a0)
1008 fsd f17, (8 * 32 + 8 * 17)(a0)
1009 fsd f18, (8 * 32 + 8 * 18)(a0)
1010 fsd f19, (8 * 32 + 8 * 19)(a0)
1011 fsd f20, (8 * 32 + 8 * 20)(a0)
1012 fsd f21, (8 * 32 + 8 * 21)(a0)
1013 fsd f22, (8 * 32 + 8 * 22)(a0)
1014 fsd f23, (8 * 32 + 8 * 23)(a0)
1015 fsd f24, (8 * 32 + 8 * 24)(a0)
1016 fsd f25, (8 * 32 + 8 * 25)(a0)
1017 fsd f26, (8 * 32 + 8 * 26)(a0)
1018 fsd f27, (8 * 32 + 8 * 27)(a0)
1019 fsd f28, (8 * 32 + 8 * 28)(a0)
1020 fsd f29, (8 * 32 + 8 * 29)(a0)
1021 fsd f30, (8 * 32 + 8 * 30)(a0)
1022 fsd f31, (8 * 32 + 8 * 31)(a0)
1025 li a0, 0 // return UNW_ESUCCESS
1028 #elif defined(__sparc__)
1031 # extern int __unw_getcontext(unw_context_t* thread_state)
1034 # thread_state pointer is in o0
1036 DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1052 std %i2, [%o0 + 104]
1053 std %i4, [%o0 + 112]
1054 std %i6, [%o0 + 120]
1056 clr %o0 // return UNW_ESUCCESS
1059 WEAK_ALIAS(__unw_getcontext, unw_getcontext)
1061 #endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1063 NO_EXEC_STACK_DIRECTIVE