arm: provide a PCS-compliant setjmp implementation

The previous setjmp-implementation (as a static inline function that
contained an 'asm volatile' sequence) was extremely fragile: (some
versions of) GCC optimised the set of registers.  One critical example
was the removal of 'r9' from the clobber list, if -ffixed-reg9 was
supplied.

To increase robustness and ensure PCS-compliant behaviour, the setjmp
and longjmp implementation are now in assembly and closely match what
one would expect to find in a libc implementation.

Signed-off-by: Philipp Tomsich <philipp.tomsich@theobroma-systems.com>
Tested-by: Andy Yan <andy.yan@rock-chips.com>
master
Philipp Tomsich 7 years ago
parent ff143d5556
commit b4806d6f1f
  1. 94
      arch/arm/include/asm/setjmp.h
  2. 6
      arch/arm/lib/Makefile
  3. 37
      arch/arm/lib/setjmp.S
  4. 42
      arch/arm/lib/setjmp_aarch64.S

@ -1,6 +1,6 @@
/* /*
* (C) Copyright 2016 * (C) Copyright 2017 Theobroma Systems Design und Consulting GmbH
* Alexander Graf <agraf@suse.de> * (C) Copyright 2016 Alexander Graf <agraf@suse.de>
* *
* SPDX-License-Identifier: GPL-2.0+ * SPDX-License-Identifier: GPL-2.0+
*/ */
@ -8,89 +8,21 @@
#ifndef _SETJMP_H_ #ifndef _SETJMP_H_
#define _SETJMP_H_ 1 #define _SETJMP_H_ 1
/*
* This really should be opaque, but the EFI implementation wrongly
* assumes that a 'struct jmp_buf_data' is defined.
*/
struct jmp_buf_data { struct jmp_buf_data {
ulong target; #if defined(__aarch64__)
ulong regs[5]; u64 regs[13];
int ret;
};
typedef struct jmp_buf_data jmp_buf[1];
static inline int setjmp(jmp_buf jmp)
{
jmp->ret = 0;
#ifdef CONFIG_ARM64
asm volatile(
"adr x1, jmp_target\n"
"str x1, %0\n"
"stp x26, x27, %1\n"
"stp x28, x29, %2\n"
"mov x1, sp\n"
"str x1, %3\n"
"jmp_target: "
: "=m" (jmp->target), "=m" (jmp->regs[0]),
"=m" (jmp->regs[2]), "=m" (jmp->regs[4])
:
: "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x16", "x17", "x18", "x19", "x20", "x21", "x22",
"x23", "x24", "x25", /* x26, x27, x28, x29, sp */
"x30", "cc", "memory");
#else
asm volatile(
#if CONFIG_IS_ENABLED(SYS_THUMB_BUILD)
".align 2\n"
"adr r0, jmp_target\n"
"add r0, r0, $1\n"
#else #else
"adr r0, jmp_target\n" u32 regs[10]; /* r4-r9, sl, fp, sp, lr */
#endif
"mov r1, %0\n"
"mov r2, sp\n"
"stm r1!, {r0, r2, r4, r5, r6, r7}\n"
".align 2\n"
"jmp_target: \n"
:
: "l" (&jmp->target)
: "r0", "r1", "r2", "r3", /* "r4", "r5", "r6", "r7", */
"r8", "r9", "r10", "r11", /* sp, */ "ip", "lr",
"cc", "memory");
#endif
return jmp->ret;
}
static inline __noreturn void longjmp(jmp_buf jmp, int ret)
{
jmp->ret = ret;
#ifdef CONFIG_ARM64
asm volatile(
"ldr x0, %0\n"
"ldr x1, %3\n"
"mov sp, x1\n"
"ldp x26, x27, %1\n"
"ldp x28, x25, %2\n"
"mov x29, x25\n"
"br x0\n"
:
: "m" (jmp->target), "m" (jmp->regs[0]), "m" (jmp->regs[2]),
"m" (jmp->regs[4])
: "x0", "x1", "x25", "x26", "x27", "x28");
#else
asm volatile(
"mov r1, %0\n"
"ldm r1!, {r0, r2, r4, r5, r6, r7}\n"
"mov sp, r2\n"
"bx r0\n"
:
: "l" (&jmp->target)
: "r1");
#endif #endif
};
while (1) { } typedef struct jmp_buf_data jmp_buf[1];
}
int setjmp(jmp_buf jmp);
void longjmp(jmp_buf jmp, int ret);
#endif /* _SETJMP_H_ */ #endif /* _SETJMP_H_ */

@ -17,6 +17,12 @@ else
obj-y += vectors.o crt0.o obj-y += vectors.o crt0.o
endif endif
ifdef CONFIG_ARM64
obj-y += setjmp_aarch64.o
else
obj-y += setjmp.o
endif
ifndef CONFIG_SPL_BUILD ifndef CONFIG_SPL_BUILD
ifdef CONFIG_ARM64 ifdef CONFIG_ARM64
obj-y += relocate_64.o obj-y += relocate_64.o

@ -0,0 +1,37 @@
/*
* (C) 2017 Theobroma Systems Design und Consulting GmbH
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <config.h>
#include <asm/assembler.h>
#include <linux/linkage.h>
.pushsection .text.setjmp, "ax"
ENTRY(setjmp)
/*
* A subroutine must preserve the contents of the registers
* r4-r8, r10, r11 (v1-v5, v7 and v8) and SP (and r9 in PCS
* variants that designate r9 as v6).
*/
mov ip, sp
stm a1, {v1-v8, ip, lr}
mov a1, #0
bx lr
ENDPROC(setjmp)
.popsection
.pushsection .text.longjmp, "ax"
ENTRY(longjmp)
ldm a1, {v1-v8, ip, lr}
mov sp, ip
mov a1, a2
/* If we were passed a return value of zero, return one instead */
cmp a1, #0
bne 1f
mov a1, #1
1:
bx lr
ENDPROC(longjmp)
.popsection

@ -0,0 +1,42 @@
/*
* (C) 2017 Theobroma Systems Design und Consulting GmbH
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <config.h>
#include <asm/macro.h>
#include <linux/linkage.h>
.pushsection .text.setjmp, "ax"
ENTRY(setjmp)
/* Preserve all callee-saved registers and the SP */
stp x19, x20, [x0,#0]
stp x21, x22, [x0,#16]
stp x23, x24, [x0,#32]
stp x25, x26, [x0,#48]
stp x27, x28, [x0,#64]
stp x29, x30, [x0,#80]
mov x2, sp
str x2, [x0, #96]
mov x0, #0
ret
ENDPROC(setjmp)
.popsection
.pushsection .text.longjmp, "ax"
ENTRY(longjmp)
ldp x19, x20, [x0,#0]
ldp x21, x22, [x0,#16]
ldp x23, x24, [x0,#32]
ldp x25, x26, [x0,#48]
ldp x27, x28, [x0,#64]
ldp x29, x30, [x0,#80]
ldr x2, [x0,#96]
mov sp, x2
/* Move the return value in place, but return 1 if passed 0. */
adds x0, xzr, x1
csinc x0, x0, xzr, ne
ret
ENDPROC(longjmp)
.popsection
Loading…
Cancel
Save