xtensa: add support for the xtensa processor architecture [2/2]

The Xtensa processor architecture is a configurable, extensible,
and synthesizable 32-bit RISC processor core provided by Tensilica, inc.

This is the second part of the basic architecture port, adding the
'arch/xtensa' directory and a readme file.

Signed-off-by: Chris Zankel <chris@zankel.net>
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
Reviewed-by: Tom Rini <trini@konsulko.com>
master
Chris Zankel 8 years ago committed by Tom Rini
parent de5e5cea02
commit c978b52410
  1. 6
      arch/Kconfig
  2. 18
      arch/xtensa/Kconfig
  3. 8
      arch/xtensa/Makefile
  4. 12
      arch/xtensa/config.mk
  5. 9
      arch/xtensa/cpu/Makefile
  6. 49
      arch/xtensa/cpu/cpu.c
  7. 44
      arch/xtensa/cpu/exceptions.c
  8. 677
      arch/xtensa/cpu/start.S
  9. 116
      arch/xtensa/cpu/u-boot.lds
  10. 13
      arch/xtensa/dts/Makefile
  11. 1
      arch/xtensa/dts/include/dt-bindings
  12. 31
      arch/xtensa/include/asm/addrspace.h
  13. 152
      arch/xtensa/include/asm/asmmacro.h
  14. 55
      arch/xtensa/include/asm/atomic.h
  15. 36
      arch/xtensa/include/asm/bitops.h
  16. 54
      arch/xtensa/include/asm/bootparam.h
  17. 83
      arch/xtensa/include/asm/byteorder.h
  18. 25
      arch/xtensa/include/asm/cache.h
  19. 211
      arch/xtensa/include/asm/cacheasm.h
  20. 24
      arch/xtensa/include/asm/config.h
  21. 1
      arch/xtensa/include/asm/errno.h
  22. 20
      arch/xtensa/include/asm/global_data.h
  23. 148
      arch/xtensa/include/asm/io.h
  24. 222
      arch/xtensa/include/asm/ldscript.h
  25. 4
      arch/xtensa/include/asm/linkage.h
  26. 20
      arch/xtensa/include/asm/misc.h
  27. 74
      arch/xtensa/include/asm/posix_types.h
  28. 11
      arch/xtensa/include/asm/processor.h
  29. 133
      arch/xtensa/include/asm/ptrace.h
  30. 95
      arch/xtensa/include/asm/regs.h
  31. 14
      arch/xtensa/include/asm/relocate.h
  32. 12
      arch/xtensa/include/asm/sections.h
  33. 10
      arch/xtensa/include/asm/string.h
  34. 27
      arch/xtensa/include/asm/system.h
  35. 60
      arch/xtensa/include/asm/types.h
  36. 41
      arch/xtensa/include/asm/u-boot.h
  37. 6
      arch/xtensa/include/asm/unaligned.h
  38. 10
      arch/xtensa/lib/Makefile
  39. 197
      arch/xtensa/lib/bootm.c
  40. 60
      arch/xtensa/lib/cache.c
  41. 179
      arch/xtensa/lib/misc.S
  42. 18
      arch/xtensa/lib/relocate.c
  43. 121
      arch/xtensa/lib/time.c

@ -88,6 +88,11 @@ config X86
select DM_SPI
select DM_SPI_FLASH
config XTENSA
bool "Xtensa architecture"
select CREATE_ARCH_SYMLINK
select SUPPORT_OF_CONTROL
endchoice
config SYS_ARCH
@ -161,3 +166,4 @@ source "arch/sandbox/Kconfig"
source "arch/sh/Kconfig"
source "arch/sparc/Kconfig"
source "arch/x86/Kconfig"
source "arch/xtensa/Kconfig"

@ -0,0 +1,18 @@
menu "Xtensa architecture"
depends on XTENSA
config SYS_ARCH
string
default "xtensa"
config SYS_CPU
string "Xtensa Core Variant"
choice
prompt "Target select"
endchoice
endmenu

@ -0,0 +1,8 @@
#
# SPDX-License-Identifier: GPL-2.0+
#
head-y := arch/xtensa/cpu/start.o
libs-y += arch/xtensa/cpu/
libs-y += arch/xtensa/lib/

@ -0,0 +1,12 @@
#
# (C) Copyright 2007 - 2013 Tensilica, Inc.
# (C) Copyright 2014 - 2016 Cadence Design Systems Inc.
#
# SPDX-License-Identifier: GPL-2.0+
#
CROSS_COMPILE ?= xtensa-linux-
PLATFORM_CPPFLAGS += -D__XTENSA__ -mlongcalls -mforce-no-pic \
-ffunction-sections -fdata-sections
LDFLAGS_FINAL += --gc-sections

@ -0,0 +1,9 @@
#
# (C) Copyright 2007 - 2013 Tensilica, Inc.
# (C) Copyright 2014 - 2016 Cadence Design Systems Inc.
#
# SPDX-License-Identifier: GPL-2.0+
#
obj-y = cpu.o exceptions.o
extra-y = start.o

@ -0,0 +1,49 @@
/*
* (C) Copyright 2008 - 2013 Tensilica Inc.
* (C) Copyright 2014 - 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
/*
* CPU specific code
*/
#include <common.h>
#include <command.h>
#include <linux/stringify.h>
#include <asm/global_data.h>
#include <asm/cache.h>
#include <asm/string.h>
#include <asm/misc.h>
DECLARE_GLOBAL_DATA_PTR;
gd_t *gd __attribute__((section(".data")));
#if defined(CONFIG_DISPLAY_CPUINFO)
/*
* Print information about the CPU.
*/
int print_cpuinfo(void)
{
char buf[120], mhz[8];
uint32_t id0, id1;
asm volatile ("rsr %0, 176\n"
"rsr %1, 208\n"
: "=r"(id0), "=r"(id1));
sprintf(buf, "CPU: Xtensa %s (id: %08x:%08x) at %s MHz\n",
XCHAL_CORE_ID, id0, id1, strmhz(mhz, gd->cpu_clk));
puts(buf);
return 0;
}
#endif
int arch_cpu_init(void)
{
gd->ram_size = CONFIG_SYS_SDRAM_SIZE;
return 0;
}

@ -0,0 +1,44 @@
/*
* (C) Copyright 2008 - 2013 Tensilica Inc.
* (C) Copyright 2014 - 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
/*
* Exception handling.
* We currently don't handle any exception and force a reset.
* (Note that alloca is a special case and handled in start.S)
*/
#include <common.h>
#include <command.h>
#include <asm/string.h>
#include <asm/regs.h>
typedef void (*handler_t)(struct pt_regs *);
void unhandled_exception(struct pt_regs *regs)
{
printf("Unhandled Exception: EXCCAUSE = %ld, EXCVADDR = %lx, pc = %lx\n",
regs->exccause, regs->excvaddr, regs->pc);
panic("*** PANIC\n");
}
handler_t exc_table[EXCCAUSE_LAST] = {
[0 ... EXCCAUSE_LAST-1] = unhandled_exception,
};
int interrupt_init(void)
{
return 0;
}
void enable_interrupts(void)
{
}
int disable_interrupts(void)
{
return 0;
}

@ -0,0 +1,677 @@
/*
* (C) Copyright 2008 - 2013 Tensilica Inc.
* (C) Copyright 2014 - 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <config.h>
#include <asm/asmmacro.h>
#include <asm/cacheasm.h>
#include <asm/regs.h>
#include <asm/arch/tie.h>
#include <asm-offsets.h>
/*
* Offsets into the the pt_regs struture.
* Make sure these always match with the structure defined in ptrace.h!
*/
#define PT_PC 0
#define PT_PS 4
#define PT_DEPC 8
#define PT_EXCCAUSE 12
#define PT_EXCVADDR 16
#define PT_DEBUGCAUSE 20
#define PT_WMASK 24
#define PT_LBEG 28
#define PT_LEND 32
#define PT_LCOUNT 36
#define PT_SAR 40
#define PT_WINDOWBASE 44
#define PT_WINDOWSTART 48
#define PT_SYSCALL 52
#define PT_ICOUNTLEVEL 56
#define PT_RESERVED 60
#define PT_AREG 64
#define PT_SIZE (64 + 64)
/*
* Cache attributes are different for full MMU and region protection.
*/
#if XCHAL_HAVE_PTP_MMU
#define CA_WRITEBACK (0x7)
#else
#define CA_WRITEBACK (0x4)
#endif
/*
* Reset vector.
* Only a trampoline to jump to _start
* (Note that we have to mark the section writable as the section contains
* a relocatable literal)
*/
.section .ResetVector.text, "awx"
.global _ResetVector
_ResetVector:
j 1f
.align 4
2: .long _start
1: l32r a2, 2b
jx a2
/*
* Processor initialization. We still run in rom space.
*
* NOTE: Running in ROM
* For Xtensa, we currently don't allow to run some code from ROM but
* unpack the data immediately to memory. This requires, for example,
* that DDR has been set up before running U-Boot. (See also comments
* inline for ways to change it)
*/
.section .reset.text, "ax"
.global _start
.align 4
_start:
/* Keep a0 = 0 for various initializations */
movi a0, 0
/*
* For full MMU cores, put page table at unmapped virtual address.
* This ensures that accesses outside the static maps result
* in miss exceptions rather than random behaviour.
*/
#if XCHAL_HAVE_PTP_MMU
wsr a0, PTEVADDR
#endif
/* Disable dbreak debug exceptions */
#if XCHAL_HAVE_DEBUG && XCHAL_NUM_DBREAK > 0
.set _index, 0
.rept XCHAL_NUM_DBREAK
wsr a0, DBREAKC + _index
.set _index, _index + 1
.endr
#endif
/* Reset windowbase and windowstart */
#if XCHAL_HAVE_WINDOWED
movi a3, 1
wsr a3, windowstart
wsr a0, windowbase
rsync
movi a0, 0 /* windowbase might have changed */
#endif
/*
* Vecbase in bitstream may differ from header files
* set or check it.
*/
#if XCHAL_HAVE_VECBASE
movi a3, XCHAL_VECBASE_RESET_VADDR /* VECBASE reset value */
wsr a3, VECBASE
#endif
#if XCHAL_HAVE_LOOPS
/* Disable loops */
wsr a0, LCOUNT
#endif
/* Set PS.WOE = 0, PS.EXCM = 0 (for loop), PS.INTLEVEL = EXCM level */
#if XCHAL_HAVE_XEA1
movi a2, 1
#else
movi a2, XCHAL_EXCM_LEVEL
#endif
wsr a2, PS
rsync
/* Unlock and invalidate caches */
___unlock_dcache_all a2, a3
___invalidate_dcache_all a2, a3
___unlock_icache_all a2, a3
___invalidate_icache_all a2, a3
isync
/* Unpack data sections */
movi a2, __reloc_table_start
movi a3, __reloc_table_end
1: beq a2, a3, 3f # no more entries?
l32i a4, a2, 0 # start destination (in RAM)
l32i a5, a2, 4 # end destination (in RAM)
l32i a6, a2, 8 # start source (in ROM)
addi a2, a2, 12 # next entry
beq a4, a5, 1b # skip, empty entry
beq a4, a6, 1b # skip, source and destination are the same
/* If there's memory protection option with 512MB TLB regions and
* cache attributes in TLB entries and caching is not inhibited,
* enable data/instruction cache for relocated image.
*/
#if XCHAL_HAVE_SPANNING_WAY && \
(!defined(CONFIG_SYS_DCACHE_OFF) || \
!defined(CONFIG_SYS_ICACHE_OFF))
srli a7, a4, 29
slli a7, a7, 29
addi a7, a7, XCHAL_SPANNING_WAY
#ifndef CONFIG_SYS_DCACHE_OFF
rdtlb1 a8, a7
srli a8, a8, 4
slli a8, a8, 4
addi a8, a8, CA_WRITEBACK
wdtlb a8, a7
#endif
#ifndef CONFIG_SYS_ICACHE_OFF
ritlb1 a8, a7
srli a8, a8, 4
slli a8, a8, 4
addi a8, a8, CA_WRITEBACK
witlb a8, a7
#endif
isync
#endif
2: l32i a7, a6, 0
addi a6, a6, 4
s32i a7, a4, 0
addi a4, a4, 4
bltu a4, a5, 2b
j 1b
3: /* All code and initalized data segments have been copied */
/* Setup PS, PS.WOE = 1, PS.EXCM = 0, PS.INTLEVEL = EXCM level. */
#if __XTENSA_CALL0_ABI__
movi a2, XCHAL_EXCM_LEVEL
#else
movi a2, (1<<PS_WOE_BIT) | XCHAL_EXCM_LEVEL
#endif
wsr a2, PS
rsync
/* Writeback */
___flush_dcache_all a2, a3
#ifdef __XTENSA_WINDOWED_ABI__
/*
* In windowed ABI caller and call target need to be within the same
* gigabyte. Put the rest of the code into the text segment and jump
* there.
*/
movi a4, .Lboard_init_code
jx a4
.text
.align 4
.Lboard_init_code:
#endif
movi a0, 0
movi sp, (CONFIG_SYS_TEXT_ADDR - 16) & 0xfffffff0
#ifdef CONFIG_DEBUG_UART
movi a4, debug_uart_init
#ifdef __XTENSA_CALL0_ABI__
callx0 a4
#else
callx4 a4
#endif
#endif
movi a4, board_init_f_alloc_reserve
#ifdef __XTENSA_CALL0_ABI__
mov a2, sp
callx0 a4
mov sp, a2
#else
mov a6, sp
callx4 a4
movsp sp, a6
#endif
movi a4, board_init_f_init_reserve
#ifdef __XTENSA_CALL0_ABI__
callx0 a4
#else
callx4 a4
#endif
/*
* Call board initialization routine (never returns).
*/
movi a4, board_init_f
#ifdef __XTENSA_CALL0_ABI__
movi a2, 0
callx0 a4
#else
movi a6, 0
callx4 a4
#endif
/* Never Returns */
ill
/*
* void relocate_code (addr_sp, gd, addr_moni)
*
* This "function" does not return, instead it continues in RAM
* after relocating the monitor code.
*
* a2 = addr_sp
* a3 = gd
* a4 = destination address
*/
.text
.globl relocate_code
.align 4
relocate_code:
abi_entry
#ifdef __XTENSA_CALL0_ABI__
mov a1, a2
mov a2, a3
mov a3, a4
movi a0, board_init_r
callx0 a0
#else
/* We can't movsp here, because the chain of stack frames may cross
* the now reserved memory. We need to toss all window frames except
* the current, create new pristine stack frame and start from scratch.
*/
rsr a0, windowbase
ssl a0
movi a0, 1
sll a0, a0
wsr a0, windowstart
rsync
movi a0, 0
/* Reserve 16-byte save area */
addi sp, a2, -16
mov a6, a3
mov a7, a4
movi a4, board_init_r
callx4 a4
#endif
ill
#if XCHAL_HAVE_EXCEPTIONS
/*
* Exception vectors.
*
* Various notes:
* - We currently don't use the user exception vector (PS.UM is always 0),
* but do define such a vector, just in case. They both jump to the
* same exception handler, though.
* - We currently only save the bare minimum number of registers:
* a0...a15, sar, loop-registers, exception register (epc1, excvaddr,
* exccause, depc)
* - WINDOWSTART is only saved to identify if registers have been spilled
* to the wrong stack (exception stack) while executing the exception
* handler.
*/
.section .KernelExceptionVector.text, "ax"
.global _KernelExceptionVector
_KernelExceptionVector:
wsr a2, EXCSAVE1
movi a2, ExceptionHandler
jx a2
.section .UserExceptionVector.text, "ax"
.global _UserExceptionVector
_UserExceptionVector:
wsr a2, EXCSAVE1
movi a2, ExceptionHandler
jx a2
#if !XCHAL_HAVE_XEA1
.section .DoubleExceptionVector.text, "ax"
.global _DoubleExceptionVector
_DoubleExceptionVector:
#ifdef __XTENSA_CALL0_ABI__
wsr a0, EXCSAVE1
movi a0, hang # report and ask user to reset board
callx0 a0
#else
wsr a4, EXCSAVE1
movi a4, hang # report and ask user to reset board
callx4 a4
#endif
#endif
/* Does not return here */
.text
.align 4
ExceptionHandler:
rsr a2, EXCCAUSE # find handler
#if XCHAL_HAVE_WINDOWED
/* Special case for alloca handler */
bnei a2, 5, 1f # jump if not alloca exception
addi a1, a1, -16 - 4 # create a small stack frame
s32i a3, a1, 0 # and save a3 (a2 still in excsave1)
movi a2, fast_alloca_exception
jx a2 # jump to fast_alloca_exception
#endif
/* All other exceptions go here: */
/* Create ptrace stack and save a0...a3 */
1: addi a2, a1, - PT_SIZE - 16
s32i a0, a2, PT_AREG + 0 * 4
s32i a1, a2, PT_AREG + 1 * 4
s32i a3, a2, PT_AREG + 3 * 4
rsr a3, EXCSAVE1
s32i a3, a2, PT_AREG + 2 * 4
mov a1, a2
/* Save remaining AR registers */
s32i a4, a1, PT_AREG + 4 * 4
s32i a5, a1, PT_AREG + 5 * 4
s32i a6, a1, PT_AREG + 6 * 4
s32i a7, a1, PT_AREG + 7 * 4
s32i a8, a1, PT_AREG + 8 * 4
s32i a9, a1, PT_AREG + 9 * 4
s32i a10, a1, PT_AREG + 10 * 4
s32i a11, a1, PT_AREG + 11 * 4
s32i a12, a1, PT_AREG + 12 * 4
s32i a13, a1, PT_AREG + 13 * 4
s32i a14, a1, PT_AREG + 14 * 4
s32i a15, a1, PT_AREG + 15 * 4
/* Save SRs */
#if XCHAL_HAVE_WINDOWED
rsr a2, WINDOWSTART
s32i a2, a1, PT_WINDOWSTART
#endif
rsr a2, SAR
rsr a3, EPC1
rsr a4, EXCVADDR
s32i a2, a1, PT_SAR
s32i a3, a1, PT_PC
s32i a4, a1, PT_EXCVADDR
#if XCHAL_HAVE_LOOPS
movi a2, 0
rsr a3, LBEG
xsr a2, LCOUNT
s32i a3, a1, PT_LBEG
rsr a3, LEND
s32i a2, a1, PT_LCOUNT
s32i a3, a1, PT_LEND
#endif
/* Set up C environment and call registered handler */
/* Setup stack, PS.WOE = 1, PS.EXCM = 0, PS.INTLEVEL = EXCM level. */
rsr a2, EXCCAUSE
#if XCHAL_HAVE_XEA1
movi a3, (1<<PS_WOE_BIT) | 1
#elif __XTENSA_CALL0_ABI__
movi a3, XCHAL_EXCM_LEVEL
#else
movi a3, (1<<PS_WOE_BIT) | XCHAL_EXCM_LEVEL
#endif
xsr a3, PS
rsync
s32i a2, a1, PT_EXCCAUSE
s32i a3, a1, PT_PS
movi a0, exc_table
addx4 a0, a2, a0
l32i a0, a0, 0
#ifdef __XTENSA_CALL0_ABI__
mov a2, a1 # Provide stack frame as only argument
callx0 a0
l32i a3, a1, PT_PS
#else
mov a6, a1 # Provide stack frame as only argument
callx4 a0
#endif
/* Restore PS and go to exception mode (PS.EXCM=1) */
wsr a3, PS
/* Restore SR registers */
#if XCHAL_HAVE_LOOPS
l32i a2, a1, PT_LBEG
l32i a3, a1, PT_LEND
l32i a4, a1, PT_LCOUNT
wsr a2, LBEG
wsr a3, LEND
wsr a4, LCOUNT
#endif
l32i a2, a1, PT_SAR
l32i a3, a1, PT_PC
wsr a2, SAR
wsr a3, EPC1
#if XCHAL_HAVE_WINDOWED
/* Do we need to simulate a MOVSP? */
l32i a2, a1, PT_WINDOWSTART
addi a3, a2, -1
and a2, a2, a3
beqz a2, 1f # Skip if regs were spilled before exc.
rsr a2, WINDOWSTART
addi a3, a2, -1
and a2, a2, a3
bnez a2, 1f # Skip if registers aren't spilled now
addi a2, a1, -16
l32i a4, a2, 0
l32i a5, a2, 4
s32i a4, a1, PT_SIZE + 0
s32i a5, a1, PT_SIZE + 4
l32i a4, a2, 8
l32i a5, a2, 12
s32i a4, a1, PT_SIZE + 8
s32i a5, a1, PT_SIZE + 12
#endif
/* Restore address register */
1: l32i a15, a1, PT_AREG + 15 * 4
l32i a14, a1, PT_AREG + 14 * 4
l32i a13, a1, PT_AREG + 13 * 4
l32i a12, a1, PT_AREG + 12 * 4
l32i a11, a1, PT_AREG + 11 * 4
l32i a10, a1, PT_AREG + 10 * 4
l32i a9, a1, PT_AREG + 9 * 4
l32i a8, a1, PT_AREG + 8 * 4
l32i a7, a1, PT_AREG + 7 * 4
l32i a6, a1, PT_AREG + 6 * 4
l32i a5, a1, PT_AREG + 5 * 4
l32i a4, a1, PT_AREG + 4 * 4
l32i a3, a1, PT_AREG + 3 * 4
l32i a2, a1, PT_AREG + 2 * 4
l32i a0, a1, PT_AREG + 0 * 4
l32i a1, a1, PT_AREG + 1 * 4 # Remove ptrace stack frame
rfe
#endif /* XCHAL_HAVE_EXCEPTIONS */
#if XCHAL_HAVE_WINDOWED
/*
* Window overflow and underflow handlers.
* The handlers must be 64 bytes apart, first starting with the underflow
* handlers underflow-4 to underflow-12, then the overflow handlers
* overflow-4 to overflow-12.
*
* Note: We rerun the underflow handlers if we hit an exception, so
* we try to access any page that would cause a page fault early.
*/
.section .WindowVectors.text, "ax"
/* 4-Register Window Overflow Vector (Handler) */
.align 64
.global _WindowOverflow4
_WindowOverflow4:
s32e a0, a5, -16
s32e a1, a5, -12
s32e a2, a5, -8
s32e a3, a5, -4
rfwo
/* 4-Register Window Underflow Vector (Handler) */
.align 64
.global _WindowUnderflow4
_WindowUnderflow4:
l32e a0, a5, -16
l32e a1, a5, -12
l32e a2, a5, -8
l32e a3, a5, -4
rfwu
/*
* a0: a0
* a1: new stack pointer = a1 - 16 - 4
* a2: available, saved in excsave1
* a3: available, saved on stack *a1
*/
/* 15*/ .byte 0xff
fast_alloca_exception: /* must be at _WindowUnderflow4 + 16 */
/* 16*/ rsr a2, PS
/* 19*/ rsr a3, WINDOWBASE
/* 22*/ extui a2, a2, PS_OWB_SHIFT, PS_OWB_SHIFT
/* 25*/ xor a2, a2, a3
/* 28*/ rsr a3, PS
/* 31*/ slli a2, a2, PS_OWB_SHIFT
/* 34*/ xor a2, a3, a2
/* 37*/ wsr a2, PS
/* 40*/ _l32i a3, a1, 0
/* 43*/ addi a1, a1, 16 + 4
/* 46*/ rsr a2, EXCSAVE1
/* 49*/ rotw -1
/* 52*/ _bbci.l a4, 31, _WindowUnderflow4 /* 0x: call4 */
/* 55*/ rotw -1
/* 58*/ _bbci.l a8, 30, _WindowUnderflow8 /* 10: call8 */
/* 61*/ _j __WindowUnderflow12 /* 11: call12 */
/* 64*/
/* 8-Register Window Overflow Vector (Handler) */
.align 64
.global _WindowOverflow8
_WindowOverflow8:
s32e a0, a9, -16
l32e a0, a1, -12
s32e a2, a9, -8
s32e a1, a9, -12
s32e a3, a9, -4
s32e a4, a0, -32
s32e a5, a0, -28
s32e a6, a0, -24
s32e a7, a0, -20
rfwo
/* 8-Register Window Underflow Vector (Handler) */
.align 64
.global _WindowUnderflow8
_WindowUnderflow8:
l32e a1, a9, -12
l32e a0, a9, -16
l32e a7, a1, -12
l32e a2, a9, -8
l32e a4, a7, -32
l32e a3, a9, -4
l32e a5, a7, -28
l32e a6, a7, -24
l32e a7, a7, -20
rfwu
/* 12-Register Window Overflow Vector (Handler) */
.align 64
.global _WindowOverflow12
_WindowOverflow12:
s32e a0, a13, -16
l32e a0, a1, -12
s32e a1, a13, -12
s32e a2, a13, -8
s32e a3, a13, -4
s32e a4, a0, -48
s32e a5, a0, -44
s32e a6, a0, -40
s32e a7, a0, -36
s32e a8, a0, -32
s32e a9, a0, -28
s32e a10, a0, -24
s32e a11, a0, -20
rfwo
/* 12-Register Window Underflow Vector (Handler) */
.org _WindowOverflow12 + 64 - 3
__WindowUnderflow12:
rotw -1
.global _WindowUnderflow12
_WindowUnderflow12:
l32e a1, a13, -12
l32e a0, a13, -16
l32e a11, a1, -12
l32e a2, a13, -8
l32e a4, a11, -48
l32e a8, a11, -32
l32e a3, a13, -4
l32e a5, a11, -44
l32e a6, a11, -40
l32e a7, a11, -36
l32e a9, a11, -28
l32e a10, a11, -24
l32e a11, a11, -20
rfwu
#endif /* XCHAL_HAVE_WINDOWED */

@ -0,0 +1,116 @@
/*
* (C) Copyright 2008 - 2013 Tensilica, Inc.
* (C) Copyright 2014 - 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <config.h>
#include <asm/ldscript.h>
#include <asm/arch/core.h>
#include <asm/addrspace.h>
#include <asm-offsets.h>
OUTPUT_ARCH(xtensa)
ENTRY(_start)
/*
* U-Boot resets from SYSROM and unpacks itself from a ROM store to RAM.
* The reset vector is usually near the base of SYSROM and has room
* above it for the ROM store into which the rest of U-Boot is packed.
* The ROM store also needs to be above any other vectors that are in ROM.
* If a core has its vectors near the top of ROM, this must be edited.
*
* Note that to run C code out of ROM, the processor would have to support
* 'relocatable' exception vectors and provide a scratch memory for the
* initial stack. Not all Xtensa processor configurations support that, so
* we can simplify the boot process and unpack U-Boot to RAM immediately.
* This, however, requires that memory have been initialized throug some
* other means (serial ROM, for example) or are initialized early (requiring
* an assembler function. See start.S for more details)
*/
SECTIONS
{
. = + SIZEOF_HEADERS;
SECTION_ResetVector(XCHAL_RESET_VECTOR_VADDR, LMA_EQ_VMA)
.reloc_table ALIGN(4) : FOLLOWING(.ResetVector.text)
{
__reloc_table_start = ABSOLUTE(.);
#if XCHAL_HAVE_WINDOWED
RELOCATE2(WindowVectors,text);
#endif
RELOCATE2(KernelExceptionVector,literal);
RELOCATE2(KernelExceptionVector,text);
RELOCATE2(UserExceptionVector,literal);
RELOCATE2(UserExceptionVector,text);
RELOCATE2(DoubleExceptionVector,literal);
RELOCATE2(DoubleExceptionVector,text);
RELOCATE1(text);
RELOCATE1(rodata);
RELOCATE1(data);
RELOCATE1(u_boot_list);
__reloc_table_end = ABSOLUTE(.);
}
#if XCHAL_HAVE_WINDOWED
SECTION_VECTOR(WindowVectors,text,XCHAL_WINDOW_VECTORS_VADDR,
FOLLOWING(.reloc_table))
SECTION_VECTOR(KernelExceptionVector,literal,XCHAL_KERNEL_VECTOR_VADDR-8,
FOLLOWING(.WindowVectors.text))
#else
SECTION_VECTOR(KernelExceptionVector,literal,XCHAL_KERNEL_VECTOR_VADDR-8,
FOLLOWING(.reloc_table))
#endif
SECTION_VECTOR(KernelExceptionVector,text,XCHAL_KERNEL_VECTOR_VADDR,
FOLLOWING(.KernelExceptionVector.literal))
SECTION_VECTOR(UserExceptionVector,literal,XCHAL_USER_VECTOR_VADDR-8,
FOLLOWING(.KernelExceptionVector.text))
SECTION_VECTOR(UserExceptionVector,text,XCHAL_USER_VECTOR_VADDR,
FOLLOWING(.UserExceptionVector.literal))
SECTION_VECTOR(DoubleExceptionVector,literal,XCHAL_DOUBLEEXC_VECTOR_VADDR-8,
FOLLOWING(.UserExceptionVector.text))
SECTION_VECTOR(DoubleExceptionVector,text,XCHAL_DOUBLEEXC_VECTOR_VADDR,
FOLLOWING(.DoubleExceptionVector.literal))
__monitor_start = CONFIG_SYS_TEXT_ADDR;
SECTION_text(CONFIG_SYS_TEXT_ADDR, FOLLOWING(.DoubleExceptionVector.text))
SECTION_rodata(ALIGN(16), FOLLOWING(.text))
SECTION_u_boot_list(ALIGN(16), FOLLOWING(.rodata))
SECTION_data(ALIGN(16), FOLLOWING(.u_boot_list))
__reloc_end = .;
__init_end = .;
SECTION_bss(__init_end (OVERLAY),)
__monitor_end = .;
/*
* On many Xtensa boards a region of RAM may be mapped to the ROM address
* space to facilitate on-chip-debug, and U-Boot must fit with that region.
* The config variables CONFIG_SYS_MONITOR_* define the region.
* If U-Boot extends beyond this region it will appear discontiguous in the
* address space and is in danger of overwriting itself during unpacking
* ("relocation").
* This causes U-Boot to crash in a way that is difficult to debug. On some
* boards (such as xtav60) the region is small enough that U-Boot will not
* fit if compiled entirely with -O0 (a common scenario). To avoid a lengthy
* debugging session when this happens, ensure a link-time error occurs.
*
*/
ASSERT(__monitor_end - __monitor_start <= CONFIG_SYS_MONITOR_LEN,
"U-Boot ROM image is too large. Check optimization level.")
SECTION_xtensa
SECTION_debug
/DISCARD/ : { *(.dynstr*) }
/DISCARD/ : { *(.hash*) }
/DISCARD/ : { *(.interp) }
/DISCARD/ : { *(.got*) }
/DISCARD/ : { *(.dynsym) }
}

@ -0,0 +1,13 @@
#
# SPDX-License-Identifier: GPL-2.0+
#
targets += $(dtb-y)
DTC_FLAGS +=
PHONY += dtbs
dtbs: $(addprefix $(obj)/, $(dtb-y))
@:
clean-files := *.dtb

@ -0,0 +1 @@
../../../../include/dt-bindings

@ -0,0 +1,31 @@
/*
* Copyright (C) 2008-2013 Tensilica Inc.
* Copyright (C) 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_ADDRSPACE_H
#define _XTENSA_ADDRSPACE_H
#include <asm/arch/core.h>
/*
* MMU Memory Map
*
* noMMU and v3 MMU have identity mapped address space on reset.
* V2 MMU:
* IO (uncached) f0000000..ffffffff -> f000000
* IO (cached) e0000000..efffffff -> f000000
* MEM (uncached) d8000000..dfffffff -> 0000000
* MEM (cached) d0000000..d7ffffff -> 0000000
*
* The actual location of memory and IO is the board property.
*/
#define IOADDR(x) (CONFIG_SYS_IO_BASE + (x))
#define MEMADDR(x) (CONFIG_SYS_MEMORY_BASE + (x))
#define PHYSADDR(x) ((x) - XCHAL_VECBASE_RESET_VADDR + \
XCHAL_VECBASE_RESET_PADDR)
#endif /* _XTENSA_ADDRSPACE_H */

@ -0,0 +1,152 @@
/*
* Copyright (C) 2005 - 2013 Tensilica Inc.
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_ASMMACRO_H
#define _XTENSA_ASMMACRO_H
#include <asm/arch/core.h>
/*
* Function entry and return macros for supported ABIs.
*/
#if defined(__XTENSA_WINDOWED_ABI__)
#define abi_entry entry sp, 16
#define abi_ret retw
#elif defined(__XTENSA_CALL0_ABI__)
#define abi_entry
#define abi_ret ret
#else
#error Unsupported Xtensa ABI
#endif
/*
* Some little helpers for loops. Use zero-overhead-loops
* where applicable and if supported by the processor.
*
* __loopi ar, at, size, inc
* ar register initialized with the start address
* at scratch register used by macro
* size size immediate value
* inc increment
*
* __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
* ar register initialized with the start address
* as register initialized with the size
* at scratch register use by macro
* inc_log2 increment [in log2]
* mask_log2 mask [in log2]
* cond true condition (used in loop'cond')
* ncond false condition (used in b'ncond')
*
* __loop as
* restart loop. 'as' register must not have been modified!
*
* __endla ar, as, incr
* ar start address (modified)
* as scratch register used by __loops/__loopi macros or
* end address used by __loopt macro
* inc increment
*/
#if XCHAL_HAVE_LOOPS
.macro __loopi ar, at, size, incr
movi \at, ((\size + \incr - 1) / (\incr))
loop \at, 99f
.endm
.macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
.ifgt \incr_log2 - 1
addi \at, \as, (1 << \incr_log2) - 1
.ifnc \mask_log2,
extui \at, \at, \incr_log2, \mask_log2
.else
srli \at, \at, \incr_log2
.endif
.endif
loop\cond \at, 99f
.endm
.macro __loopt ar, as, at, incr_log2
sub \at, \as, \ar
.ifgt \incr_log2 - 1
addi \at, \at, (1 << \incr_log2) - 1
srli \at, \at, \incr_log2
.endif
loop \at, 99f
.endm
.macro __loop as
loop \as, 99f
.endm
.macro __endl ar, as
99:
.endm
#else
.macro __loopi ar, at, size, incr
movi \at, ((\size + \incr - 1) / (\incr))
addi \at, \ar, \size
98:
.endm
.macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
.ifnc \mask_log2,
extui \at, \as, \incr_log2, \mask_log2
.else
.ifnc \ncond,
srli \at, \as, \incr_log2
.endif
.endif
.ifnc \ncond,
b\ncond \at, 99f
.endif
.ifnc \mask_log2,
slli \at, \at, \incr_log2
add \at, \ar, \at
.else
add \at, \ar, \as
.endif
98:
.endm
.macro __loopt ar, as, at, incr_log2
98:
.endm
.macro __loop as
98:
.endm
.macro __endl ar, as
bltu \ar, \as, 98b
99:
.endm
#endif
.macro __endla ar, as, incr
addi \ar, \ar, \incr
__endl \ar \as
.endm
#endif /* _XTENSA_ASMMACRO_H */

@ -0,0 +1,55 @@
/*
* Copyright (C) 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_ATOMIC_H
#define _XTENSA_ATOMIC_H
#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic_set(v, i) ((v)->counter = (i))
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
v->counter += i;
local_irq_restore(flags);
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
v->counter -= i;
local_irq_restore(flags);
}
static inline void atomic_inc(atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
++v->counter;
local_irq_restore(flags);
}
static inline void atomic_dec(atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
--v->counter;
local_irq_restore(flags);
}
#endif

@ -0,0 +1,36 @@
/*
* Copyright (C) 2001 - 2012 Tensilica Inc.
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_BITOPS_H
#define _XTENSA_BITOPS_H
#include <asm/system.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/__ffs.h>
static inline int test_bit(int nr, const void *addr)
{
return ((unsigned char *)addr)[nr >> 3] & (1u << (nr & 7));
}
static inline int test_and_set_bit(int nr, volatile void *addr)
{
unsigned long flags;
unsigned char tmp;
unsigned char mask = 1u << (nr & 7);
local_irq_save(flags);
tmp = ((unsigned char *)addr)[nr >> 3];
((unsigned char *)addr)[nr >> 3] |= mask;
local_irq_restore(flags);
return tmp & mask;
}
#endif /* _XTENSA_BITOPS_H */

@ -0,0 +1,54 @@
/*
* Definition of the Linux/Xtensa boot parameter structure
*
* Copyright (C) 2001 - 2009 Tensilica Inc.
*
* (Concept borrowed from the 68K port)
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_BOOTPARAM_H
#define _XTENSA_BOOTPARAM_H
#define BP_VERSION 0x0001
#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console */
#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
#define BP_TAG_FDT 0x1006 /* flat device tree */
#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
#define BP_TAG_LAST 0x7E0B /* last tag */
#ifndef __ASSEMBLY__
/* All records are aligned to 4 bytes */
struct bp_tag {
unsigned short id; /* tag id */
unsigned short size; /* size of this record excluding the structure*/
unsigned long data[0]; /* data */
};
#define bp_tag_next(tag) \
((struct bp_tag *)((unsigned long)((tag) + 1) + (tag)->size))
struct meminfo {
unsigned long type;
unsigned long start;
unsigned long end;
};
#define MEMORY_TYPE_CONVENTIONAL 0x1000
#define MEMORY_TYPE_NONE 0x2000
struct sysmem_info {
int nr_banks;
struct meminfo bank[0];
};
#endif
#endif

@ -0,0 +1,83 @@
/*
* Based on Linux/Xtensa kernel version
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_BYTEORDER_H
#define _XTENSA_BYTEORDER_H
#include <asm/types.h>
static inline __attribute__((const)) __u32 ___arch__swab32(__u32 x)
{
__u32 res;
/* instruction sequence from Xtensa ISA release 2/2000 */
__asm__("ssai 8\n\t"
"srli %0, %1, 16\n\t"
"src %0, %0, %1\n\t"
"src %0, %0, %0\n\t"
"src %0, %1, %0\n"
: "=&a" (res)
: "a" (x)
);
return res;
}
static inline __attribute__((const)) __u16 ___arch__swab16(__u16 x)
{
/*
* Given that 'short' values are signed (i.e., can be negative),
* we cannot assume that the upper 16-bits of the register are
* zero. We are careful to mask values after shifting.
*/
/*
* There exists an anomaly between xt-gcc and xt-xcc. xt-gcc
* inserts an extui instruction after putting this function inline
* to ensure that it uses only the least-significant 16 bits of
* the result. xt-xcc doesn't use an extui, but assumes the
* __asm__ macro follows convention that the upper 16 bits of an
* 'unsigned short' result are still zero. This macro doesn't
* follow convention; indeed, it leaves garbage in the upport 16
* bits of the register.
*
* Declaring the temporary variables 'res' and 'tmp' to be 32-bit
* types while the return type of the function is a 16-bit type
* forces both compilers to insert exactly one extui instruction
* (or equivalent) to mask off the upper 16 bits.
*/
__u32 res;
__u32 tmp;
__asm__("extui %1, %2, 8, 8\n\t"
"slli %0, %2, 8\n\t"
"or %0, %0, %1\n"
: "=&a" (res), "=&a" (tmp)
: "a" (x)
);
return res;
}
#define __arch__swab32(x) ___arch__swab32(x)
#define __arch__swab16(x) ___arch__swab16(x)
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
#endif
#ifdef __XTENSA_EL__
# include <linux/byteorder/little_endian.h>
#elif defined(__XTENSA_EB__)
# include <linux/byteorder/big_endian.h>
#else
# error processor byte order undefined!
#endif
#endif /* _XTENSA_BYTEORDER_H */

@ -0,0 +1,25 @@
/*
* Copyright (C) 2009 Tensilica Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_CACHE_H
#define _XTENSA_CACHE_H
#include <asm/arch/core.h>
#define ARCH_DMA_MINALIGN XCHAL_DCACHE_LINESIZE
#ifndef __ASSEMBLY__
void __flush_dcache_all(void);
void __flush_invalidate_dcache_range(unsigned long addr, unsigned long size);
void __invalidate_dcache_all(void);
void __invalidate_dcache_range(unsigned long addr, unsigned long size);
void __invalidate_icache_all(void);
void __invalidate_icache_range(unsigned long addr, unsigned long size);
#endif
#endif /* _XTENSA_CACHE_H */

@ -0,0 +1,211 @@
/*
* Copyright (C) 2006 Tensilica Inc.
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_CACHEASM_H
#define _XTENSA_CACHEASM_H
#include <asm/cache.h>
#include <asm/asmmacro.h>
#include <linux/stringify.h>
#define PAGE_SIZE 4096
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
/*
* Define cache functions as macros here so that they can be used
* by the kernel and boot loader. We should consider moving them to a
* library that can be linked by both.
*
* Locking
*
* ___unlock_dcache_all
* ___unlock_icache_all
*
* Flush and invaldating
*
* ___flush_invalidate_dcache_{all|range|page}
* ___flush_dcache_{all|range|page}
* ___invalidate_dcache_{all|range|page}
* ___invalidate_icache_{all|range|page}
*
*/
.macro __loop_cache_all ar at insn size line_width
movi \ar, 0
__loopi \ar, \at, \size, (4 << (\line_width))
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
.endm
.macro __loop_cache_range ar as at insn line_width
extui \at, \ar, 0, \line_width
add \as, \as, \at
__loops \ar, \as, \at, \line_width
\insn \ar, 0
__endla \ar, \at, (1 << (\line_width))
.endm
.macro __loop_cache_page ar at insn line_width
__loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
.endm
.macro ___unlock_dcache_all ar at
#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___unlock_icache_all ar at
#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
#endif
.endm
.macro ___flush_invalidate_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___flush_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___invalidate_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___invalidate_icache_all ar at
#if XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
XCHAL_ICACHE_LINEWIDTH
#endif
.endm
.macro ___flush_invalidate_dcache_range ar as at
#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___flush_dcache_range ar as at
#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___invalidate_dcache_range ar as at
#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___invalidate_icache_range ar as at
#if XCHAL_ICACHE_SIZE
__loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
#endif
.endm
.macro ___flush_invalidate_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___flush_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___invalidate_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___invalidate_icache_page ar as
#if XCHAL_ICACHE_SIZE
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
#endif
.endm
#endif /* _XTENSA_CACHEASM_H */

@ -0,0 +1,24 @@
/*
* Copyright (C) 2009 Tensilica Inc.
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _ASM_CONFIG_H_
#define _ASM_CONFIG_H_
#include <asm/arch/core.h>
#define CONFIG_LMB
/*
* Make boot parameters available in the MMUv2 virtual memory layout by
* restricting used physical memory to the first 128MB.
*/
#if XCHAL_HAVE_PTP_MMU
#define CONFIG_VERY_BIG_RAM
#define CONFIG_MAX_MEM_MAPPED (128 << 20)
#endif
#endif

@ -0,0 +1 @@
#include <asm-generic/errno.h>

@ -0,0 +1,20 @@
/*
* (C) Copyright 2007, Tensilica Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_GBL_DATA_H
#define _XTENSA_GBL_DATA_H
/* Architecture-specific global data */
struct arch_global_data {
unsigned long cpu_clk;
};
#include <asm-generic/global_data.h>
#define DECLARE_GLOBAL_DATA_PTR extern gd_t *gd
#endif /* _XTENSA_GBL_DATA_H */

@ -0,0 +1,148 @@
/*
* IO header file
*
* Copyright (C) 2001-2007 Tensilica Inc.
* Based on the Linux/Xtensa version of this header.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_IO_H
#define _XTENSA_IO_H
#include <linux/types.h>
#include <asm/byteorder.h>
/*
* swap functions to change byte order from little-endian to big-endian and
* vice versa.
*/
static inline unsigned short _swapw(unsigned short v)
{
return (v << 8) | (v >> 8);
}
static inline unsigned int _swapl(unsigned int v)
{
return (v << 24) | ((v & 0xff00) << 8) |
((v >> 8) & 0xff00) | (v >> 24);
}
/*
* Generic I/O
*/
#define readb(addr) \
({ unsigned char __v = (*(volatile unsigned char *)(addr)); __v; })
#define readw(addr) \
({ unsigned short __v = (*(volatile unsigned short *)(addr)); __v; })
#define readl(addr) \
({ unsigned int __v = (*(volatile unsigned int *)(addr)); __v; })
#define writeb(b, addr) (void)((*(volatile unsigned char *)(addr)) = (b))
#define writew(b, addr) (void)((*(volatile unsigned short *)(addr)) = (b))
#define writel(b, addr) (void)((*(volatile unsigned int *)(addr)) = (b))
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
/* These are the definitions for the x86 IO instructions
* inb/inw/inl/outb/outw/outl, the "string" versions
* insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
* inb_p/inw_p/...
* The macros don't do byte-swapping.
*/
#define inb(port) readb((u8 *)((port)))
#define outb(val, port) writeb((val), (u8 *)((unsigned long)(port)))
#define inw(port) readw((u16 *)((port)))
#define outw(val, port) writew((val), (u16 *)((unsigned long)(port)))
#define inl(port) readl((u32 *)((port)))
#define outl(val, port) writel((val), (u32 *)((unsigned long)(port)))
#define inb_p(port) inb((port))
#define outb_p(val, port) outb((val), (port))
#define inw_p(port) inw((port))
#define outw_p(val, port) outw((val), (port))
#define inl_p(port) inl((port))
#define outl_p(val, port) outl((val), (port))
void insb(unsigned long port, void *dst, unsigned long count);
void insw(unsigned long port, void *dst, unsigned long count);
void insl(unsigned long port, void *dst, unsigned long count);
void outsb(unsigned long port, const void *src, unsigned long count);
void outsw(unsigned long port, const void *src, unsigned long count);
void outsl(unsigned long port, const void *src, unsigned long count);
#define IO_SPACE_LIMIT ~0
#define memset_io(a, b, c) memset((void *)(a), (b), (c))
#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
/* At this point the Xtensa doesn't provide byte swap instructions */
#ifdef __XTENSA_EB__
# define in_8(addr) (*(u8 *)(addr))
# define in_le16(addr) _swapw(*(u16 *)(addr))
# define in_le32(addr) _swapl(*(u32 *)(addr))
# define out_8(b, addr) *(u8 *)(addr) = (b)
# define out_le16(b, addr) *(u16 *)(addr) = _swapw(b)
# define out_le32(b, addr) *(u32 *)(addr) = _swapl(b)
#elif defined(__XTENSA_EL__)
# define in_8(addr) (*(u8 *)(addr))
# define in_le16(addr) (*(u16 *)(addr))
# define in_le32(addr) (*(u32 *)(addr))
# define out_8(b, addr) *(u8 *)(addr) = (b)
# define out_le16(b, addr) *(u16 *)(addr) = (b)
# define out_le32(b, addr) *(u32 *)(addr) = (b)
#else
# error processor byte order undefined!
#endif
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#define MAP_NOCACHE (0)
#define MAP_WRCOMBINE (0)
#define MAP_WRBACK (0)
#define MAP_WRTHROUGH (0)
static inline void *
map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags)
{
return (void *)paddr;
}
/*
* Take down a mapping set up by map_physmem().
*/
static inline void unmap_physmem(void *vaddr, unsigned long flags)
{
}
static inline phys_addr_t virt_to_phys(void *vaddr)
{
return (phys_addr_t)((unsigned long)vaddr);
}
/*
* Dummy function to keep U-Boot's cfi_flash.c driver happy.
*/
static inline void sync(void)
{
}
#endif /* _XTENSA_IO_H */

@ -0,0 +1,222 @@
/*
* (C) Copyright 2007 Tensilica, Inc.
* (C) Copyright 2014 - 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_LDSCRIPT_H
#define _XTENSA_LDSCRIPT_H
/*
* This linker script is pre-processed with CPP to avoid hard-coding
* addresses that depend on the Xtensa core configuration, because
* this FPGA board can be used with a huge variety of Xtensa cores.
*/
#include <asm/arch/core.h>
#include <asm/addrspace.h>
#define ALIGN_LMA 4
#define LMA_EQ_VMA
#define FORCE_OUTPUT . = .
#define FOLLOWING(sec) \
AT(((LOADADDR(sec) + SIZEOF(sec) + ALIGN_LMA-1)) & ~(ALIGN_LMA-1))
/*
* Specify an output section that will be added to the ROM store table
* (PACKED_SECTION) or one that will be resident in ROM (RESIDENT_SECTION).
* 'symname' is a base name for section boundary symbols *_start & *_end.
* 'lma' is the load address at which a section will be packed in ROM.
* 'region' is the basename identifying a memory region and program header.
* 'keep' prevents removal of empty sections (must be 'KEEP' or 'NOKEEP').
*/
#define RELOCATE1(_sec_) \
LONG(_##_sec_##_start); \
LONG(_##_sec_##_end); \
LONG(LOADADDR(.##_sec_));
#define RELOCATE2(_sym_, _sec_) \
LONG(_##_sym_##_##_sec_##_start); \
LONG(_##_sym_##_##_sec_##_end); \
LONG(LOADADDR(.##_sym_##.##_sec_));
#define SECTION_VECTOR(_sym_, _sec_, _vma_, _lma_) \
.##_sym_##.##_sec_ _vma_ : _lma_ \
{ \
. = ALIGN(4); \
_##_sym_##_##_sec_##_start = ABSOLUTE(.); \
KEEP(*(.##_sym_##.##_sec_)) \
_##_sym_##_##_sec_##_end = ABSOLUTE(.); \
}
/* In MMU configs there are two aliases of SYSROM, cached and uncached.
* For various reasons it is simpler to use the uncached mapping for load
* addresses, so ROM sections end up contiguous with the reset vector and
* we get a compact binary image. However we can gain performance by doing
* the unpacking from the cached ROM mapping. So we adjust all the load
* addresses in the ROM store table with an offset to the cached mapping,
* including the symbols referring to the ROM store table itself.
*/
#define SECTION_ResetVector(_vma_, _lma_) \
.ResetVector.text _vma_ : _lma_ \
{ \
FORCE_OUTPUT; \
KEEP(*(.ResetVector.text)); \
KEEP(*(.reset.literal .reset.text)) \
}
#define SECTION_text(_vma_, _lma_) \
.text _vma_ : _lma_ \
{ \
_text_start = ABSOLUTE(.); \
*(.literal .text) \
*(.literal.* .text.* .stub) \
*(.gnu.warning .gnu.linkonce.literal.*) \
*(.gnu.linkonce.t.*.literal .gnu.linkonce.t.*) \
*(.fini.literal) \
*(.fini) \
*(.gnu.version) \
_text_end = ABSOLUTE(.); \
}
#define SECTION_rodata(_vma_, _lma_) \
.rodata _vma_ : _lma_ \
{ \
_rodata_start = ABSOLUTE(.); \
*(.rodata) \
*(.rodata.*) \
*(.dtb.init.rodata) \
*(.gnu.linkonce.r.*) \
*(.rodata1) \
__XT_EXCEPTION_TABLE__ = ABSOLUTE(.); \
*(.xt_except_table) \
*(.gcc_except_table) \
*(.gnu.linkonce.e.*) \
*(.gnu.version_r) \
. = ALIGN(16); \
_rodata_end = ABSOLUTE(.); \
}
#define SECTION_u_boot_list(_vma_, _lma_) \
.u_boot_list _vma_ : _lma_ \
{ \
_u_boot_list_start = ABSOLUTE(.); \
KEEP(*(SORT(.u_boot_list*))); \
_u_boot_list_end = ABSOLUTE(.); \
}
#define SECTION_data(_vma_, _lma_) \
.data _vma_ : _lma_ \
{ \
_data_start = ABSOLUTE(.); \
*(.data) \
*(.data.*) \
*(.gnu.linkonce.d.*) \
*(.data1) \
*(.sdata) \
*(.sdata.*) \
*(.gnu.linkonce.s.*) \
*(.sdata2) \
*(.sdata2.*) \
*(.gnu.linkonce.s2.*) \
*(.jcr) \
*(.eh_frame) \
*(.dynamic) \
*(.gnu.version_d) \
_data_end = ABSOLUTE(.); \
}
#define SECTION_lit4(_vma_, _lma_) \
.lit4 _vma_ : _lma_ \
{ \
_lit4_start = ABSOLUTE(.); \
*(*.lit4) \
*(.gnu.linkonce.lit4.*) \
_lit4_end = ABSOLUTE(.); \
}
#define SECTION_bss(_vma_, _lma_) \
.bss _vma_ : _lma_ \
{ \
. = ALIGN(8); \
_bss_start = ABSOLUTE(.); \
__bss_start = ABSOLUTE(.); \
*(.dynsbss) \
*(.sbss) \
*(.sbss.*) \
*(.gnu.linkonce.sb.*) \
*(.scommon) \
*(.sbss2) \
*(.sbss2.*) \
*(.gnu.linkonce.sb2.*) \
*(.dynbss) \
*(.bss) \
*(.bss.*) \
*(.gnu.linkonce.b.*) \
*(COMMON) \
*(.sram.bss) \
. = ALIGN(8); \
_bss_end = ABSOLUTE(.); \
__bss_end = ABSOLUTE(.); \
_end = ALIGN(0x8); \
PROVIDE(end = ALIGN(0x8)); \
_stack_sentry = ALIGN(0x8); \
}
#define SECTION_debug \
.debug 0 : { *(.debug) } \
.line 0 : { *(.line) } \
.debug_srcinfo 0 : { *(.debug_srcinfo) } \
.debug_sfnames 0 : { *(.debug_sfnames) } \
.debug_aranges 0 : { *(.debug_aranges) } \
.debug_pubnames 0 : { *(.debug_pubnames) } \
.debug_info 0 : { *(.debug_info) } \
.debug_abbrev 0 : { *(.debug_abbrev) } \
.debug_line 0 : { *(.debug_line) } \
.debug_frame 0 : { *(.debug_frame) } \
.debug_str 0 : { *(.debug_str) } \
.debug_loc 0 : { *(.debug_loc) } \
.debug_macinfo 0 : { *(.debug_macinfo) } \
.debug_weaknames 0 : { *(.debug_weaknames) } \
.debug_funcnames 0 : { *(.debug_funcnames) } \
.debug_typenames 0 : { *(.debug_typenames) } \
.debug_varnames 0 : { *(.debug_varnames) }
#define SECTION_xtensa \
.xt.insn 0 : \
{ \
KEEP (*(.xt.insn)) \
KEEP (*(.gnu.linkonce.x.*)) \
} \
.xt.prop 0 : \
{ \
KEEP (*(.xt.prop)) \
KEEP (*(.xt.prop.*)) \
KEEP (*(.gnu.linkonce.prop.*)) \
} \
.xt.lit 0 : \
{ \
KEEP (*(.xt.lit)) \
KEEP (*(.xt.lit.*)) \
KEEP (*(.gnu.linkonce.p.*)) \
} \
.xt.profile_range 0 : \
{ \
KEEP (*(.xt.profile_range)) \
KEEP (*(.gnu.linkonce.profile_range.*)) \
} \
.xt.profile_ranges 0 : \
{ \
KEEP (*(.xt.profile_ranges)) \
KEEP (*(.gnu.linkonce.xt.profile_ranges.*)) \
} \
.xt.profile_files 0 : \
{ \
KEEP (*(.xt.profile_files)) \
KEEP (*(.gnu.linkonce.xt.profile_files.*)) \
}
#endif /* _XTENSA_LDSCRIPT_H */

@ -0,0 +1,4 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#endif

@ -0,0 +1,20 @@
/*
* (C) Copyright 2008, Tensilica Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*
********************************************************************
* NOTE: This header file defines an interface to U-Boot. Including
* this (unmodified) header file in another file is considered normal
* use of U-Boot, and does *not* fall under the heading of "derived
* work".
********************************************************************
*/
#ifndef _XTENSA_MISC_H
#define _XTENSA_MISC_H
/* Used in cpu/xtensa/cpu.c */
void board_reset(void);
#endif /* _XTENSA_MISC_H */

@ -0,0 +1,74 @@
/*
* Copyright (C) 2007, Tensilica Inc.
*
* Based on the ARM version: Copyright (C) 1996-1998 Russell King.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_POSIX_TYPES_H
#define _XTENSA_POSIX_TYPES_H
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
typedef unsigned short __kernel_dev_t;
typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
#ifdef __GNUC__
typedef long long __kernel_loff_t;
#endif
typedef struct {
#if defined(__KERNEL__) || defined(__USE_ALL)
int val[2];
#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
int __val[2];
#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
} __kernel_fsid_t;
#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
#undef __FD_SET
#define __FD_SET(fd, fdsetp) \
(((fd_set *)fdsetp)->fds_bits[fd >> 5] |= (1<<(fd & 31)))
#undef __FD_CLR
#define __FD_CLR(fd, fdsetp) \
(((fd_set *)fdsetp)->fds_bits[fd >> 5] &= ~(1<<(fd & 31)))
#undef __FD_ISSET
#define __FD_ISSET(fd, fdsetp) \
((((fd_set *)fdsetp)->fds_bits[fd >> 5] & (1<<(fd & 31))) != 0)
#undef __FD_ZERO
#define __FD_ZERO(fdsetp) \
(memset(fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
#endif
#endif /* _XTENSA_POSIX_TYPES_H */

@ -0,0 +1,11 @@
/*
* Copyright (C) 1997 Tensilica Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_PROCESSOR_H
#define _XTENSA_PROCESSOR_H
#endif /* _XTENSA_PROCESSOR_H */

@ -0,0 +1,133 @@
/*
* Copyright (C) 2001 - 2007 Tensilica Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H
#include <compiler.h>
/*
* Kernel stack
*
* +-----------------------+ -------- STACK_SIZE
* | register file | |
* +-----------------------+ |
* | struct pt_regs | |
* +-----------------------+ | ------ PT_REGS_OFFSET
* double : 16 bytes spill area : | ^
* exception :- - - - - - - - - - - -: | |
* frame : struct pt_regs : | |
* :- - - - - - - - - - - -: | |
* | | | |
* | memory stack | | |
* | | | |
* ~ ~ ~ ~
* ~ ~ ~ ~
* | | | |
* | | | |
* +-----------------------+ | | --- STACK_BIAS
* | struct task_struct | | | ^
* current --> +-----------------------+ | | |
* | struct thread_info | | | |
* +-----------------------+ --------
*/
#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables) */
#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
#define EXC_TABLE_SIZE 0x400
/* Registers used by strace */
#define REG_A_BASE 0xfc000000
#define REG_AR_BASE 0x04000000
#define REG_PC 0x14000000
#define REG_PS 0x080000e6
#define REG_WB 0x08000048
#define REG_WS 0x08000049
#define REG_LBEG 0x08000000
#define REG_LEND 0x08000001
#define REG_LCOUNT 0x08000002
#define REG_SAR 0x08000003
#define REG_DEPC 0x080000c0
#define REG_EXCCAUSE 0x080000e8
#define REG_EXCVADDR 0x080000ee
#define SYSCALL_NR 0x1
#define AR_REGNO_TO_A_REGNO(ar, wb) (ar - wb*4) & ~(XCHAL_NUM_AREGS - 1)
/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14
#define PTRACE_SETFPREGS 15
#define PTRACE_GETFPREGSIZE 18
#ifndef __ASSEMBLY__
/*
* This struct defines the way the registers are stored on the
* kernel stack during a system call or other kernel entry.
*/
struct pt_regs {
unsigned long pc; /* 4 */
unsigned long ps; /* 8 */
unsigned long depc; /* 12 */
unsigned long exccause; /* 16 */
unsigned long excvaddr; /* 20 */
unsigned long debugcause; /* 24 */
unsigned long wmask; /* 28 */
unsigned long lbeg; /* 32 */
unsigned long lend; /* 36 */
unsigned long lcount; /* 40 */
unsigned long sar; /* 44 */
unsigned long windowbase; /* 48 */
unsigned long windowstart; /* 52 */
unsigned long syscall; /* 56 */
unsigned long icountlevel; /* 60 */
int reserved[1]; /* 64 */
/* Make sure the areg field is 16 bytes aligned */
int align[0] __aligned(16);
/* current register frame.
* Note: The ESF for kernel exceptions ends after 16 registers!
*/
unsigned long areg[16]; /* 128 (64) */
};
#ifdef __KERNEL__
# define task_pt_regs(tsk) ((struct pt_regs *) \
(task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020) != 0)
# define instruction_pointer(regs) ((regs)->pc)
void show_regs(struct pt_regs *);
# ifndef CONFIG_SMP
# define profile_pc(regs) instruction_pointer(regs)
# endif
#endif /* __KERNEL__ */
#else /* __ASSEMBLY__ */
#ifdef __KERNEL__
# include <asm/asm-offsets.h>
#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE)
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _XTENSA_PTRACE_H */

@ -0,0 +1,95 @@
/*
* Copyright (c) 2006 Tensilica, Inc. All Rights Reserved.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_REGS_H
#define _XTENSA_REGS_H
/* Special registers */
#define IBREAKA 128
#define DBREAKA 144
#define DBREAKC 160
/* Special names for read-only and write-only interrupt registers */
#define INTREAD 226
#define INTSET 226
#define INTCLEAR 227
/* EXCCAUSE register fields */
#define EXCCAUSE_EXCCAUSE_SHIFT 0
#define EXCCAUSE_EXCCAUSE_MASK 0x3F
#define EXCCAUSE_ILLEGAL_INSTRUCTION 0
#define EXCCAUSE_SYSTEM_CALL 1
#define EXCCAUSE_INSTRUCTION_FETCH_ERROR 2
#define EXCCAUSE_LOAD_STORE_ERROR 3
#define EXCCAUSE_LEVEL1_INTERRUPT 4
#define EXCCAUSE_ALLOCA 5
#define EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6
#define EXCCAUSE_SPECULATION 7
#define EXCCAUSE_PRIVILEGED 8
#define EXCCAUSE_UNALIGNED 9
#define EXCCAUSE_INSTR_DATA_ERROR 12
#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
#define EXCCAUSE_INSTR_ADDR_ERROR 14
#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
#define EXCCAUSE_ITLB_MISS 16
#define EXCCAUSE_ITLB_MULTIHIT 17
#define EXCCAUSE_ITLB_PRIVILEGE 18
#define EXCCAUSE_ITLB_SIZE_RESTRICTION 19
#define EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20
#define EXCCAUSE_DTLB_MISS 24
#define EXCCAUSE_DTLB_MULTIHIT 25
#define EXCCAUSE_DTLB_PRIVILEGE 26
#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
#define EXCCAUSE_COPROCESSOR0_DISABLED 32
#define EXCCAUSE_COPROCESSOR1_DISABLED 33
#define EXCCAUSE_COPROCESSOR2_DISABLED 34
#define EXCCAUSE_COPROCESSOR3_DISABLED 35
#define EXCCAUSE_COPROCESSOR4_DISABLED 36
#define EXCCAUSE_COPROCESSOR5_DISABLED 37
#define EXCCAUSE_COPROCESSOR6_DISABLED 38
#define EXCCAUSE_COPROCESSOR7_DISABLED 39
#define EXCCAUSE_LAST 63
/* PS register fields */
#define PS_WOE_BIT 18
#define PS_CALLINC_SHIFT 16
#define PS_CALLINC_MASK 0x00030000
#define PS_OWB_SHIFT 8
#define PS_OWB_MASK 0x00000F00
#define PS_RING_SHIFT 6
#define PS_RING_MASK 0x000000C0
#define PS_UM_BIT 5
#define PS_EXCM_BIT 4
#define PS_INTLEVEL_SHIFT 0
#define PS_INTLEVEL_MASK 0x0000000F
/* DBREAKCn register fields */
#define DBREAKC_MASK_BIT 0
#define DBREAKC_MASK_MASK 0x0000003F
#define DBREAKC_LOAD_BIT 30
#define DBREAKC_LOAD_MASK 0x40000000
#define DBREAKC_STOR_BIT 31
#define DBREAKC_STOR_MASK 0x80000000
/* DEBUGCAUSE register fields */
#define DEBUGCAUSE_DEBUGINT_BIT 5 /* External debug interrupt */
#define DEBUGCAUSE_BREAKN_BIT 4 /* BREAK.N instruction */
#define DEBUGCAUSE_BREAK_BIT 3 /* BREAK instruction */
#define DEBUGCAUSE_DBREAK_BIT 2 /* DBREAK match */
#define DEBUGCAUSE_IBREAK_BIT 1 /* IBREAK match */
#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
#endif /* _XTENSA_SPECREG_H */

@ -0,0 +1,14 @@
/*
* Copyright (C) 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _ASM_XTENSA_RELOCATE_H
#define _ASM_XTENSA_RELOCATE_H
#include <common.h>
int clear_bss(void);
#endif /* _ASM_XTENSA_RELOCATE_H */

@ -0,0 +1,12 @@
/*
* Copyright (c) 2012 The Chromium OS Authors.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef __ASM_XTENSA_SECTIONS_H
#define __ASM_XTENSA_SECTIONS_H
#include <asm-generic/sections.h>
#endif

@ -0,0 +1,10 @@
#ifndef _XTENSA_STRING_H
#define _XTENSA_STRING_H
/*
* Use the generic string functions in U-Boot's lib_generic.
* In the boot loader we care about compactness more than performance.
* Prototypes will be taken from <linux/string.h>
*/
#endif /* _XTENSA_STRING_H */

@ -0,0 +1,27 @@
/*
* Copyright (C) 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_SYSTEM_H
#define _XTENSA_SYSTEM_H
#include <asm/arch/core.h>
#if XCHAL_HAVE_INTERRUPTS
#define local_irq_save(flags) \
__asm__ __volatile__ ("rsil %0, %1" \
: "=a"(flags) \
: "I"(XCHAL_EXCM_LEVEL) \
: "memory")
#define local_irq_restore(flags) \
__asm__ __volatile__ ("wsr %0, ps\n\t" \
"rsync" \
:: "a"(flags) : "memory")
#else
#define local_irq_save(flags) ((void)(flags))
#define local_irq_restore(flags) ((void)(flags))
#endif
#endif

@ -0,0 +1,60 @@
/*
* Copyright (C) 1997 Tensilica Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef _XTENSA_TYPES_H
#define _XTENSA_TYPES_H
typedef unsigned short umode_t;
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
* header files exported to user space
*/
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
typedef __signed__ long long __s64;
typedef unsigned long long __u64;
#endif
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#ifdef __KERNEL__
typedef signed char s8;
typedef unsigned char u8;
typedef signed short s16;
typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
typedef signed long long s64;
typedef unsigned long long u64;
#define BITS_PER_LONG 32
/* Dma addresses are 32-bits wide */
typedef u32 dma_addr_t;
typedef unsigned long phys_addr_t;
typedef unsigned long phys_size_t;
#endif /* __KERNEL__ */
#endif /* _XTENSA_TYPES_H */

@ -0,0 +1,41 @@
/*
* (C) Copyright 2007, Tensilica Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*
********************************************************************
* NOTE: This header file defines an interface to U-Boot. Including
* this (unmodified) header file in another file is considered normal
* use of U-Boot, and does *not* fall under the heading of "derived
* work".
********************************************************************
*/
#ifndef _XTENSA_U_BOOT_H
#define _XTENSA_U_BOOT_H
#ifdef CONFIG_SYS_GENERIC_BOARD
/* Use the generic board which requires a unified bd_info */
#include <asm-generic/u-boot.h>
#else
#ifndef __ASSEMBLY__
typedef struct bd_info {
int bi_baudrate; /* serial console baudrate */
unsigned long bi_ip_addr; /* IP Address */
unsigned char bi_enetaddr[6]; /* Ethernet adress */
unsigned long bi_boot_params; /* where this board expects params */
unsigned long bi_memstart; /* start of DRAM memory VA */
unsigned long bi_memsize; /* size of DRAM memory in bytes */
unsigned long bi_flashstart; /* start of FLASH memory */
unsigned long bi_flashsize; /* size of FLASH memory */
unsigned long bi_flashoffset; /* offset to skip UBoot image */
} bd_t;
#endif /* __ ASSEMBLY__ */
#endif /* CONFIG_SYS_GENERIC_BOARD */
/* For image.h:image_check_target_arch() */
#define IH_ARCH_DEFAULT IH_ARCH_XTENSA
#endif /* _XTENSA_U_BOOT_H */

@ -0,0 +1,6 @@
#ifndef _ASM_XTENSA_UNALIGNED_H
#define _ASM_XTENSA_UNALIGNED_H
#include <asm-generic/unaligned.h>
#endif /* _ASM_XTENSA_UNALIGNED_H */

@ -0,0 +1,10 @@
#
# (C) Copyright 2007 - 2013 Tensilica Inc.
# (C) Copyright 2014 - 2016 Cadence Design Systems Inc.
#
# SPDX-License-Identifier: GPL-2.0+
#
obj-$(CONFIG_CMD_BOOTM) += bootm.o
obj-y += cache.o misc.o relocate.o time.o

@ -0,0 +1,197 @@
/*
* (C) Copyright 2008 - 2013 Tensilica Inc.
* (C) Copyright 2014 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <command.h>
#include <u-boot/zlib.h>
#include <asm/byteorder.h>
#include <asm/addrspace.h>
#include <asm/bootparam.h>
#include <asm/cache.h>
#include <image.h>
DECLARE_GLOBAL_DATA_PTR;
/*
* Setup boot-parameters.
*/
static struct bp_tag *setup_first_tag(struct bp_tag *params)
{
params->id = BP_TAG_FIRST;
params->size = sizeof(long);
*(unsigned long *)&params->data = BP_VERSION;
return bp_tag_next(params);
}
static struct bp_tag *setup_last_tag(struct bp_tag *params)
{
params->id = BP_TAG_LAST;
params->size = 0;
return bp_tag_next(params);
}
static struct bp_tag *setup_memory_tag(struct bp_tag *params)
{
struct bd_info *bd = gd->bd;
struct meminfo *mem;
params->id = BP_TAG_MEMORY;
params->size = sizeof(struct meminfo);
mem = (struct meminfo *)params->data;
mem->type = MEMORY_TYPE_CONVENTIONAL;
mem->start = bd->bi_memstart;
mem->end = bd->bi_memstart + bd->bi_memsize;
printf(" MEMORY: tag:0x%04x, type:0X%lx, start:0X%lx, end:0X%lx\n",
BP_TAG_MEMORY, mem->type, mem->start, mem->end);
return bp_tag_next(params);
}
static struct bp_tag *setup_commandline_tag(struct bp_tag *params,
char *cmdline)
{
int len;
if (!cmdline)
return params;
len = strlen(cmdline);
params->id = BP_TAG_COMMAND_LINE;
params->size = (len + 3) & -4;
strcpy((char *)params->data, cmdline);
printf(" COMMAND_LINE: tag:0x%04x, size:%u, data:'%s'\n",
BP_TAG_COMMAND_LINE, params->size, cmdline);
return bp_tag_next(params);
}
static struct bp_tag *setup_ramdisk_tag(struct bp_tag *params,
unsigned long rd_start,
unsigned long rd_end)
{
struct meminfo *mem;
if (rd_start == rd_end)
return params;
/* Add a single banked memory */
params->id = BP_TAG_INITRD;
params->size = sizeof(struct meminfo);
mem = (struct meminfo *)params->data;
mem->type = MEMORY_TYPE_CONVENTIONAL;
mem->start = PHYSADDR(rd_start);
mem->end = PHYSADDR(rd_end);
printf(" INITRD: tag:0x%x, type:0X%04lx, start:0X%lx, end:0X%lx\n",
BP_TAG_INITRD, mem->type, mem->start, mem->end);
return bp_tag_next(params);
}
static struct bp_tag *setup_serial_tag(struct bp_tag *params)
{
params->id = BP_TAG_SERIAL_BAUDRATE;
params->size = sizeof(unsigned long);
params->data[0] = gd->baudrate;
printf(" SERIAL_BAUDRATE: tag:0x%04x, size:%u, baudrate:%lu\n",
BP_TAG_SERIAL_BAUDRATE, params->size, params->data[0]);
return bp_tag_next(params);
}
#ifdef CONFIG_OF_LIBFDT
static struct bp_tag *setup_fdt_tag(struct bp_tag *params, void *fdt_start)
{
params->id = BP_TAG_FDT;
params->size = sizeof(unsigned long);
params->data[0] = (unsigned long)fdt_start;
printf(" FDT: tag:0x%04x, size:%u, start:0x%lx\n",
BP_TAG_FDT, params->size, params->data[0]);
return bp_tag_next(params);
}
#endif
/*
* Boot Linux.
*/
int do_bootm_linux(int flag, int argc, char *argv[], bootm_headers_t *images)
{
struct bp_tag *params, *params_start;
ulong initrd_start, initrd_end;
char *commandline = getenv("bootargs");
if (!(flag & (BOOTM_STATE_OS_GO | BOOTM_STATE_OS_FAKE_GO)))
return 0;
show_boot_progress(15);
if (images->rd_start) {
initrd_start = images->rd_start;
initrd_end = images->rd_end;
} else {
initrd_start = 0;
initrd_end = 0;
}
params_start = (struct bp_tag *)gd->bd->bi_boot_params;
params = params_start;
params = setup_first_tag(params);
params = setup_memory_tag(params);
params = setup_commandline_tag(params, commandline);
params = setup_serial_tag(params);
if (initrd_start)
params = setup_ramdisk_tag(params, initrd_start, initrd_end);
#ifdef CONFIG_OF_LIBFDT
if (images->ft_addr)
params = setup_fdt_tag(params, images->ft_addr);
#endif
printf("\n");
params = setup_last_tag(params);
show_boot_progress(15);
printf("Transferring Control to Linux @0x%08lx ...\n\n",
(ulong)images->ep);
flush_dcache_range((unsigned long)params_start, (unsigned long)params);
if (flag & BOOTM_STATE_OS_FAKE_GO)
return 0;
/*
* _start() in vmlinux expects boot params in register a2.
* NOTE:
* Disable/delete your u-boot breakpoints before stepping into linux.
*/
asm volatile ("mov a2, %0\n\t"
"jx %1\n\t"
: : "a" (params_start), "a" (images->ep)
: "a2");
/* Does not return */
return 1;
}

@ -0,0 +1,60 @@
/*
* (C) Copyright 2008 - 2013 Tensilica Inc.
* (C) Copyright 2014 - 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <asm/cache.h>
/*
* We currently run always with caches enabled when running from memory.
* Xtensa version D or later will support changing cache behavior, so
* we could implement it if necessary.
*/
int dcache_status(void)
{
return 1;
}
void dcache_enable(void)
{
}
void dcache_disable(void)
{
}
void flush_cache(ulong start_addr, ulong size)
{
__flush_invalidate_dcache_range(start_addr, size);
__invalidate_icache_range(start_addr, size);
}
void flush_dcache_all(void)
{
__flush_dcache_all();
__invalidate_icache_all();
}
void flush_dcache_range(ulong start_addr, ulong end_addr)
{
__flush_invalidate_dcache_range(start_addr, end_addr - start_addr);
}
void invalidate_dcache_range(ulong start, ulong stop)
{
__invalidate_dcache_range(start, stop - start);
}
void invalidate_dcache_all(void)
{
__invalidate_dcache_all();
}
void invalidate_icache_all(void)
{
__invalidate_icache_all();
}

@ -0,0 +1,179 @@
/*
* Miscellaneous assembly functions.
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/cacheasm.h>
/*
* void __invalidate_icache_page(ulong start)
*/
ENTRY(__invalidate_icache_page)
abi_entry
___invalidate_icache_page a2 a3
isync
abi_ret
ENDPROC(__invalidate_icache_page)
/*
* void __invalidate_dcache_page(ulong start)
*/
ENTRY(__invalidate_dcache_page)
abi_entry
___invalidate_dcache_page a2 a3
dsync
abi_ret
ENDPROC(__invalidate_dcache_page)
/*
* void __flush_invalidate_dcache_page(ulong start)
*/
ENTRY(__flush_invalidate_dcache_page)
abi_entry
___flush_invalidate_dcache_page a2 a3
dsync
abi_ret
ENDPROC(__flush_invalidate_dcache_page)
/*
* void __flush_dcache_page(ulong start)
*/
ENTRY(__flush_dcache_page)
abi_entry
___flush_dcache_page a2 a3
dsync
abi_ret
ENDPROC(__flush_dcache_page)
/*
* void __invalidate_icache_range(ulong start, ulong size)
*/
ENTRY(__invalidate_icache_range)
abi_entry
___invalidate_icache_range a2 a3 a4
isync
abi_ret
ENDPROC(__invalidate_icache_range)
/*
* void __flush_invalidate_dcache_range(ulong start, ulong size)
*/
ENTRY(__flush_invalidate_dcache_range)
abi_entry
___flush_invalidate_dcache_range a2 a3 a4
dsync
abi_ret
ENDPROC(__flush_invalidate_dcache_range)
/*
* void _flush_dcache_range(ulong start, ulong size)
*/
ENTRY(__flush_dcache_range)
abi_entry
___flush_dcache_range a2 a3 a4
dsync
abi_ret
ENDPROC(__flush_dcache_range)
/*
* void _invalidate_dcache_range(ulong start, ulong size)
*/
ENTRY(__invalidate_dcache_range)
abi_entry
___invalidate_dcache_range a2 a3 a4
abi_ret
ENDPROC(__invalidate_dcache_range)
/*
* void _invalidate_icache_all(void)
*/
ENTRY(__invalidate_icache_all)
abi_entry
___invalidate_icache_all a2 a3
isync
abi_ret
ENDPROC(__invalidate_icache_all)
/*
* void _flush_invalidate_dcache_all(void)
*/
ENTRY(__flush_invalidate_dcache_all)
abi_entry
___flush_invalidate_dcache_all a2 a3
dsync
abi_ret
ENDPROC(__flush_invalidate_dcache_all)
/*
* void _invalidate_dcache_all(void)
*/
ENTRY(__invalidate_dcache_all)
abi_entry
___invalidate_dcache_all a2 a3
dsync
abi_ret
ENDPROC(__invalidate_dcache_all)

@ -0,0 +1,18 @@
/*
* Copyright (C) 2016 Cadence Design Systems Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <asm/relocate.h>
#include <asm/sections.h>
#include <asm/string.h>
int clear_bss(void)
{
size_t len = (size_t)&__bss_end - (size_t)&__bss_start;
memset((void *)&__bss_start, 0x00, len);
return 0;
}

@ -0,0 +1,121 @@
/*
* (C) Copyright 2008 - 2013 Tensilica Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <asm/global_data.h>
#include <linux/stringify.h>
DECLARE_GLOBAL_DATA_PTR;
#if XCHAL_HAVE_CCOUNT
static ulong get_ccount(void)
{
ulong ccount;
asm volatile ("rsr %0,"__stringify(CCOUNT) : "=a" (ccount));
return ccount;
}
#else
static ulong fake_ccount;
#define get_ccount() fake_ccount
#endif
static void delay_cycles(unsigned cycles)
{
#if XCHAL_HAVE_CCOUNT
unsigned expiry = get_ccount() + cycles;
while ((signed)(expiry - get_ccount()) > 0)
;
#else
#warning "Without Xtensa timer option, timing will not be accurate."
/*
* Approximate the cycle count by a loop iteration count.
* This is highly dependent on config and optimization.
*/
volatile unsigned i;
for (i = cycles >> 4U; i > 0; --i)
;
fake_ccount += cycles;
#endif
}
/*
* Delay (busy-wait) for a number of microseconds.
*/
void __udelay(unsigned long usec)
{
ulong lo, hi, i;
ulong mhz = CONFIG_SYS_CLK_FREQ / 1000000;
/* Scale to support full 32-bit usec range */
lo = usec & ((1<<22)-1);
hi = usec >> 22UL;
for (i = 0; i < hi; ++i)
delay_cycles(mhz << 22);
delay_cycles(mhz * lo);
}
/*
* Return the elapsed time (ticks) since 'base'.
*/
ulong get_timer(ulong base)
{
/* Don't tie up a timer; use cycle counter if available (or fake it) */
#if XCHAL_HAVE_CCOUNT
register ulong ccount;
__asm__ volatile ("rsr %0, CCOUNT" : "=a"(ccount));
return ccount / (CONFIG_SYS_CLK_FREQ / CONFIG_SYS_HZ) - base;
#else
/*
* Add at least the overhead of this call (in cycles).
* Avoids hanging in case caller doesn't use udelay().
* Note that functions that don't call udelay() (such as
* the "sleep" command) will not get a significant delay
* because there is no time reference.
*/
fake_ccount += 20;
return fake_ccount / (CONFIG_SYS_CLK_FREQ / CONFIG_SYS_HZ) - base;
#endif
}
/*
* This function is derived from ARM/PowerPC code (read timebase as long long).
* On Xtensa it just returns the timer value.
*/
unsigned long long get_ticks(void)
{
return get_timer(0);
}
/*
* This function is derived from ARM/PowerPC code (timebase clock frequency).
* On Xtensa it returns the number of timer ticks per second.
*/
ulong get_tbclk(void)
{
ulong tbclk;
tbclk = CONFIG_SYS_HZ;
return tbclk;
}
#if XCHAL_HAVE_CCOUNT
unsigned long timer_get_us(void)
{
unsigned long ccount;
__asm__ volatile ("rsr %0, CCOUNT" : "=a"(ccount));
return ccount / (CONFIG_SYS_CLK_FREQ / 1000000);
}
#endif
Loading…
Cancel
Save