Merge branch 'master' of git://git.denx.de/u-boot-samsung

master
Tom Rini 9 years ago
commit 6fa361903c
  1. 2
      arch/arm/cpu/armv7/exynos/Makefile
  2. 62
      arch/arm/cpu/armv7/exynos/common_setup.h
  3. 3
      arch/arm/cpu/armv7/exynos/exynos5_setup.h
  4. 147
      arch/arm/cpu/armv7/exynos/lowlevel_init.c
  5. 128
      arch/arm/cpu/armv7/exynos/sec_boot.S
  6. 35
      arch/arm/cpu/armv7/exynos/soc.c
  7. 5
      arch/arm/include/asm/arch-exynos/cpu.h
  8. 88
      arch/arm/include/asm/arch-exynos/system.h
  9. 44
      arch/arm/include/asm/armv7.h
  10. 16
      include/configs/exynos5420-common.h

@ -7,6 +7,8 @@
obj-y += clock.o power.o soc.o system.o pinmux.o tzpc.o
obj-$(CONFIG_EXYNOS5420) += sec_boot.o
ifdef CONFIG_SPL_BUILD
obj-$(CONFIG_EXYNOS5) += clock_init_exynos5.o
obj-$(CONFIG_EXYNOS5) += dmc_common.o dmc_init_ddr3.o

@ -23,6 +23,8 @@
* MA 02111-1307 USA
*/
#include <asm/arch/system.h>
#define DMC_OFFSET 0x10000
/*
@ -43,3 +45,63 @@ void system_clock_init(void);
int do_lowlevel_init(void);
void sdelay(unsigned long);
enum l2_cache_params {
CACHE_DATA_RAM_LATENCY_2_CYCLES = (2 << 0),
CACHE_DATA_RAM_LATENCY_3_CYCLES = (3 << 0),
CACHE_DISABLE_CLEAN_EVICT = (1 << 3),
CACHE_DATA_RAM_SETUP = (1 << 5),
CACHE_TAG_RAM_LATENCY_2_CYCLES = (2 << 6),
CACHE_TAG_RAM_LATENCY_3_CYCLES = (3 << 6),
CACHE_ENABLE_HAZARD_DETECT = (1 << 7),
CACHE_TAG_RAM_SETUP = (1 << 9),
CACHE_ECC_AND_PARITY = (1 << 21),
CACHE_ENABLE_FORCE_L2_LOGIC = (1 << 27)
};
#ifndef CONFIG_SYS_L2CACHE_OFF
/*
* Configure L2CTLR to get timings that keep us from hanging/crashing.
*
* Must be inline here since low_power_start() is called without a
* stack (!).
*/
static inline void configure_l2_ctlr(void)
{
uint32_t val;
mrc_l2_ctlr(val);
val |= CACHE_TAG_RAM_SETUP |
CACHE_DATA_RAM_SETUP |
CACHE_TAG_RAM_LATENCY_2_CYCLES |
CACHE_DATA_RAM_LATENCY_2_CYCLES;
if (proid_is_exynos5420() || proid_is_exynos5800()) {
val |= CACHE_ECC_AND_PARITY |
CACHE_TAG_RAM_LATENCY_3_CYCLES |
CACHE_DATA_RAM_LATENCY_3_CYCLES;
}
mcr_l2_ctlr(val);
}
/*
* Configure L2ACTLR.
*
* Must be inline here since low_power_start() is called without a
* stack (!).
*/
static inline void configure_l2_actlr(void)
{
uint32_t val;
if (proid_is_exynos5420() || proid_is_exynos5800()) {
mrc_l2_aux_ctlr(val);
val |= CACHE_ENABLE_FORCE_L2_LOGIC |
CACHE_DISABLE_CLEAN_EVICT;
mcr_l2_aux_ctlr(val);
}
}
#endif

@ -700,6 +700,9 @@
#define CLK_DIV_CPERI1_VAL NOT_AVAILABLE
#else
#define CPU_CONFIG_STATUS_OFFSET 0x80
#define CPU_RST_FLAG_VAL 0xFCBA0D10
#define PAD_RETENTION_DRAM_COREBLK_VAL 0x10000000
/* APLL_CON1 */

@ -31,7 +31,10 @@
#include <asm/arch/tzpc.h>
#include <asm/arch/periph.h>
#include <asm/arch/pinmux.h>
#include <asm/arch/system.h>
#include <asm/armv7.h>
#include "common_setup.h"
#include "exynos5_setup.h"
/* These are the things we can do during low-level init */
enum {
@ -42,6 +45,128 @@ enum {
DO_POWER = 1 << 4,
};
#ifdef CONFIG_EXYNOS5420
/*
* Power up secondary CPUs.
*/
static void secondary_cpu_start(void)
{
v7_enable_smp(EXYNOS5420_INFORM_BASE);
svc32_mode_en();
branch_bx(CONFIG_EXYNOS_RELOCATE_CODE_BASE);
}
/*
* This is the entry point of hotplug-in and
* cluster switching.
*/
static void low_power_start(void)
{
uint32_t val, reg_val;
reg_val = readl(EXYNOS5420_SPARE_BASE);
if (reg_val != CPU_RST_FLAG_VAL) {
writel(0x0, CONFIG_LOWPOWER_FLAG);
branch_bx(0x0);
}
reg_val = readl(CONFIG_PHY_IRAM_BASE + 0x4);
if (reg_val != (uint32_t)&low_power_start) {
/* Store jump address as low_power_start if not present */
writel((uint32_t)&low_power_start, CONFIG_PHY_IRAM_BASE + 0x4);
dsb();
sev();
}
/* Set the CPU to SVC32 mode */
svc32_mode_en();
#ifndef CONFIG_SYS_L2CACHE_OFF
/* Read MIDR for Primary Part Number */
mrc_midr(val);
val = (val >> 4);
val &= 0xf;
if (val == 0xf) {
configure_l2_ctlr();
configure_l2_actlr();
v7_enable_l2_hazard_detect();
}
#endif
/* Invalidate L1 & TLB */
val = 0x0;
mcr_tlb(val);
mcr_icache(val);
/* Disable MMU stuff and caches */
mrc_sctlr(val);
val &= ~((0x2 << 12) | 0x7);
val |= ((0x1 << 12) | (0x8 << 8) | 0x2);
mcr_sctlr(val);
/* CPU state is hotplug or reset */
secondary_cpu_start();
/* Core should not enter into WFI here */
wfi();
}
/*
* Pointer to this function is stored in iRam which is used
* for jump and power down of a specific core.
*/
static void power_down_core(void)
{
uint32_t tmp, core_id, core_config;
/* Get the unique core id */
/*
* Multiprocessor Affinity Register
* [11:8] Cluster ID
* [1:0] CPU ID
*/
mrc_mpafr(core_id);
tmp = core_id & 0x3;
core_id = (core_id >> 6) & ~3;
core_id |= tmp;
core_id &= 0x3f;
/* Set the status of the core to low */
core_config = (core_id * CPU_CONFIG_STATUS_OFFSET);
core_config += EXYNOS5420_CPU_CONFIG_BASE;
writel(0x0, core_config);
/* Core enter WFI */
wfi();
}
/*
* Configurations for secondary cores are inapt at this stage.
* Reconfigure secondary cores. Shutdown and change the status
* of all cores except the primary core.
*/
static void secondary_cores_configure(void)
{
/* Clear secondary boot iRAM base */
writel(0x0, (CONFIG_EXYNOS_RELOCATE_CODE_BASE + 0x1C));
/* set lowpower flag and address */
writel(CPU_RST_FLAG_VAL, CONFIG_LOWPOWER_FLAG);
writel((uint32_t)&low_power_start, CONFIG_LOWPOWER_ADDR);
writel(CPU_RST_FLAG_VAL, EXYNOS5420_SPARE_BASE);
/* Store jump address for power down */
writel((uint32_t)&power_down_core, CONFIG_PHY_IRAM_BASE + 0x4);
/* Need all core power down check */
dsb();
sev();
}
extern void relocate_wait_code(void);
#endif
int do_lowlevel_init(void)
{
uint32_t reset_status;
@ -49,6 +174,28 @@ int do_lowlevel_init(void)
arch_cpu_init();
#ifndef CONFIG_SYS_L2CACHE_OFF
/*
* Init L2 cache parameters here for use by boot and resume
*
* These are here instead of in v7_outer_cache_enable() so that the
* L2 cache settings get properly set even at resume time or if we're
* running U-Boot with the cache off. The kernel still needs us to
* set these for it.
*/
configure_l2_ctlr();
configure_l2_actlr();
dsb();
isb();
#endif
#ifdef CONFIG_EXYNOS5420
relocate_wait_code();
/* Reconfigure secondary cores */
secondary_cores_configure();
#endif
reset_status = get_reset_status();
switch (reset_status) {

@ -0,0 +1,128 @@
/*
* Copyright (C) 2013 Samsung Electronics
* Akshay Saraswat <akshay.s@samsung.com>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <config.h>
#include <asm/arch/cpu.h>
.globl relocate_wait_code
relocate_wait_code:
adr r0, code_base @ r0: source address (start)
adr r1, code_end @ r1: source address (end)
ldr r2, =0x02073000 @ r2: target address
1:
ldmia r0!, {r3-r6}
stmia r2!, {r3-r6}
cmp r0, r1
blt 1b
b code_end
.ltorg
/*
* Secondary core waits here until Primary wake it up.
* Below code is copied to CONFIG_EXYNOS_RELOCATE_CODE_BASE.
* This is a workaround code which is supposed to act as a
* substitute/supplement to the iROM code.
*
* This workaround code is relocated to the address 0x02073000
* because that comes out to be the last 4KB of the iRAM
* (Base Address - 0x02020000, Limit Address - 0x020740000).
*
* U-boot and kernel are aware of this code and flags by the simple
* fact that we are implementing a workaround in the last 4KB
* of the iRAM and we have already defined these flag and address
* values in both kernel and U-boot for our use.
*/
code_base:
b 1f
/*
* These addresses are being used as flags in u-boot and kernel.
*
* Jump address for resume and flag to check for resume/reset:
* Resume address - 0x2073008
* Resume flag - 0x207300C
*
* Jump address for cluster switching:
* Switch address - 0x2073018
*
* Jump address for core hotplug:
* Hotplug address - 0x207301C
*
* Jump address for C2 state (Reserved for future not being used right now):
* C2 address - 0x2073024
*
* Managed per core status for the active cluster:
* CPU0 state - 0x2073028
* CPU1 state - 0x207302C
* CPU2 state - 0x2073030
* CPU3 state - 0x2073034
*
* Managed per core GIC status for the active cluster:
* CPU0 gic state - 0x2073038
* CPU1 gic state - 0x207303C
* CPU2 gic state - 0x2073040
* CPU3 gic state - 0x2073044
*
* Logic of the code:
* Step-1: Read current CPU status.
* Step-2: If it's a resume then continue, else jump to step 4.
* Step-3: Clear inform1 PMU register and jump to inform0 value.
* Step-4: If it's a switch, C2 or reset, get the hotplug address.
* Step-5: If address is not available, enter WFE.
* Step-6: If address is available, jump to that address.
*/
nop @ for backward compatibility
.word 0x0 @ REG0: RESUME_ADDR
.word 0x0 @ REG1: RESUME_FLAG
.word 0x0 @ REG2
.word 0x0 @ REG3
_switch_addr:
.word 0x0 @ REG4: SWITCH_ADDR
_hotplug_addr:
.word 0x0 @ REG5: CPU1_BOOT_REG
.word 0x0 @ REG6
_c2_addr:
.word 0x0 @ REG7: REG_C2_ADDR
_cpu_state:
.word 0x1 @ CPU0_STATE : RESET
.word 0x2 @ CPU1_STATE : SECONDARY RESET
.word 0x2 @ CPU2_STATE : SECONDARY RESET
.word 0x2 @ CPU3_STATE : SECONDARY RESET
_gic_state:
.word 0x0 @ CPU0 - GICD_IGROUPR0
.word 0x0 @ CPU1 - GICD_IGROUPR0
.word 0x0 @ CPU2 - GICD_IGROUPR0
.word 0x0 @ CPU3 - GICD_IGROUPR0
1:
adr r0, _cpu_state
mrc p15, 0, r7, c0, c0, 5 @ read MPIDR
and r7, r7, #0xf @ r7 = cpu id
/* Read the current cpu state */
ldr r10, [r0, r7, lsl #2]
svc_entry:
tst r10, #(1 << 4)
adrne r0, _switch_addr
bne wait_for_addr
/* Clear INFORM1 */
ldr r0, =(0x10040000 + 0x804)
ldr r1, [r0]
cmp r1, #0x0
movne r1, #0x0
strne r1, [r0]
/* Get INFORM0 */
ldrne r1, =(0x10040000 + 0x800)
ldrne pc, [r1]
tst r10, #(1 << 0)
ldrne pc, =0x23e00000
adr r0, _hotplug_addr
wait_for_addr:
ldr r1, [r0]
cmp r1, #0x0
bxne r1
wfe
b wait_for_addr
.ltorg
code_end:
mov pc, lr

@ -9,13 +9,6 @@
#include <asm/io.h>
#include <asm/system.h>
enum l2_cache_params {
CACHE_TAG_RAM_SETUP = (1 << 9),
CACHE_DATA_RAM_SETUP = (1 << 5),
CACHE_TAG_RAM_LATENCY = (2 << 6),
CACHE_DATA_RAM_LATENCY = (2 << 0)
};
void reset_cpu(ulong addr)
{
writel(0x1, samsung_get_base_swreset());
@ -28,31 +21,3 @@ void enable_caches(void)
dcache_enable();
}
#endif
#ifndef CONFIG_SYS_L2CACHE_OFF
/*
* Set L2 cache parameters
*/
static void exynos5_set_l2cache_params(void)
{
unsigned int val = 0;
asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r"(val));
val |= CACHE_TAG_RAM_SETUP |
CACHE_DATA_RAM_SETUP |
CACHE_TAG_RAM_LATENCY |
CACHE_DATA_RAM_LATENCY;
asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val));
}
/*
* Sets L2 cache related parameters before enabling data cache
*/
void v7_outer_cache_enable(void)
{
if (cpu_is_exynos5())
exynos5_set_l2cache_params();
}
#endif

@ -153,6 +153,10 @@
#define EXYNOS5420_CLOCK_BASE 0x10010000
#define EXYNOS5420_POWER_BASE 0x10040000
#define EXYNOS5420_SWRESET 0x10040400
#define EXYNOS5420_INFORM_BASE 0x10040800
#define EXYNOS5420_SPARE_BASE 0x10040900
#define EXYNOS5420_CPU_CONFIG_BASE 0x10042000
#define EXYNOS5420_CPU_STATUS_BASE 0x10042004
#define EXYNOS5420_SYSREG_BASE 0x10050000
#define EXYNOS5420_TZPC_BASE 0x100E0000
#define EXYNOS5420_WATCHDOG_BASE 0x101D0000
@ -186,6 +190,7 @@
#define EXYNOS5420_USB3PHY_BASE DEVICE_NOT_AVAILABLE
#define EXYNOS5420_USB_HOST_XHCI_BASE DEVICE_NOT_AVAILABLE
#ifndef __ASSEMBLY__
#include <asm/io.h>
/* CPU detection macros */

@ -37,6 +37,94 @@ struct exynos5_sysreg {
#define USB20_PHY_CFG_HOST_LINK_EN (1 << 0)
/*
* Data Synchronization Barrier acts as a special kind of memory barrier.
* No instruction in program order after this instruction executes until
* this instruction completes. This instruction completes when:
* - All explicit memory accesses before this instruction complete.
* - All Cache, Branch predictor and TLB maintenance operations before
* this instruction complete.
*/
#define dsb() __asm__ __volatile__ ("dsb\n\t" : : );
/*
* This instruction causes an event to be signaled to all cores
* within a multiprocessor system. If SEV is implemented,
* WFE must also be implemented.
*/
#define sev() __asm__ __volatile__ ("sev\n\t" : : );
/*
* If the Event Register is not set, WFE suspends execution until
* one of the following events occurs:
* - an IRQ interrupt, unless masked by the CPSR I-bit
* - an FIQ interrupt, unless masked by the CPSR F-bit
* - an Imprecise Data abort, unless masked by the CPSR A-bit
* - a Debug Entry request, if Debug is enabled
* - an Event signaled by another processor using the SEV instruction.
* If the Event Register is set, WFE clears it and returns immediately.
* If WFE is implemented, SEV must also be implemented.
*/
#define wfe() __asm__ __volatile__ ("wfe\n\t" : : );
/* Move 0xd3 value to CPSR register to enable SVC mode */
#define svc32_mode_en() __asm__ __volatile__ \
("@ I&F disable, Mode: 0x13 - SVC\n\t" \
"msr cpsr_c, #0x13|0xC0\n\t" : : )
/* Set program counter with the given value */
#define set_pc(x) __asm__ __volatile__ ("mov pc, %0\n\t" : : "r"(x))
/* Branch to the given location */
#define branch_bx(x) __asm__ __volatile__ ("bx %0\n\t" : : "r"(x))
/* Read Main Id register */
#define mrc_midr(x) __asm__ __volatile__ \
("mrc p15, 0, %0, c0, c0, 0\n\t" : "=r"(x) : )
/* Read Multiprocessor Affinity Register */
#define mrc_mpafr(x) __asm__ __volatile__ \
("mrc p15, 0, %0, c0, c0, 5\n\t" : "=r"(x) : )
/* Read System Control Register */
#define mrc_sctlr(x) __asm__ __volatile__ \
("mrc p15, 0, %0, c1, c0, 0\n\t" : "=r"(x) : )
/* Read Auxiliary Control Register */
#define mrc_auxr(x) __asm__ __volatile__ \
("mrc p15, 0, %0, c1, c0, 1\n\t" : "=r"(x) : )
/* Read L2 Control register */
#define mrc_l2_ctlr(x) __asm__ __volatile__ \
("mrc p15, 1, %0, c9, c0, 2\n\t" : "=r"(x) : )
/* Read L2 Auxilliary Control register */
#define mrc_l2_aux_ctlr(x) __asm__ __volatile__ \
("mrc p15, 1, %0, c15, c0, 0\n\t" : "=r"(x) : )
/* Write System Control Register */
#define mcr_sctlr(x) __asm__ __volatile__ \
("mcr p15, 0, %0, c1, c0, 0\n\t" : : "r"(x))
/* Write Auxiliary Control Register */
#define mcr_auxr(x) __asm__ __volatile__ \
("mcr p15, 0, %0, c1, c0, 1\n\t" : : "r"(x))
/* Invalidate all instruction caches to PoU */
#define mcr_icache(x) __asm__ __volatile__ \
("mcr p15, 0, %0, c7, c5, 0\n\t" : : "r"(x))
/* Invalidate unified TLB */
#define mcr_tlb(x) __asm__ __volatile__ \
("mcr p15, 0, %0, c8, c7, 0\n\t" : : "r"(x))
/* Write L2 Control register */
#define mcr_l2_ctlr(x) __asm__ __volatile__ \
("mcr p15, 1, %0, c9, c0, 2\n\t" : : "r"(x))
/* Write L2 Auxilliary Control register */
#define mcr_l2_aux_ctlr(x) __asm__ __volatile__ \
("mcr p15, 1, %0, c15, c0, 0\n\t" : : "r"(x))
void set_usbhost_mode(unsigned int mode);
void set_system_display_ctrl(void);
int exynos_lcd_early_init(const void *blob);

@ -69,6 +69,50 @@
#define CP15DSB asm volatile ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0))
#define CP15DMB asm volatile ("mcr p15, 0, %0, c7, c10, 5" : : "r" (0))
/*
* Workaround for ARM errata # 798870
* Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been
* stalled for 1024 cycles to verify that its hazard condition still exists.
*/
static inline void v7_enable_l2_hazard_detect(void)
{
uint32_t val;
/* L2ACTLR[7]: Enable hazard detect timeout */
asm volatile ("mrc p15, 1, %0, c15, c0, 0\n\t" : "=r"(val));
val |= (1 << 7);
asm volatile ("mcr p15, 1, %0, c15, c0, 0\n\t" : : "r"(val));
}
/*
* Workaround for ARM errata # 799270
* Ensure that the L2 logic has been used within the previous 256 cycles
* before modifying the ACTLR.SMP bit. This is required during boot before
* MMU has been enabled, or during a specified reset or power down sequence.
*/
static inline void v7_enable_smp(uint32_t address)
{
uint32_t temp, val;
/* Read auxiliary control register */
asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t" : "=r"(val));
/* Enable SMP */
val |= (1 << 6);
/* Dummy read to assure L2 access */
temp = readl(address);
temp &= 0;
val |= temp;
/* Write auxiliary control register */
asm volatile ("mcr p15, 0, %0, c1, c0, 1\n\t" : : "r"(val));
CP15DSB;
CP15ISB;
}
void v7_en_l2_hazard_detect(void);
void v7_outer_cache_enable(void);
void v7_outer_cache_disable(void);
void v7_outer_cache_flush_all(void);

@ -38,4 +38,20 @@
#define CONFIG_BOARD_REV_GPIO_COUNT 2
#define CONFIG_PHY_IRAM_BASE 0x02020000
/* Address for relocating helper code (Last 4 KB of IRAM) */
#define CONFIG_EXYNOS_RELOCATE_CODE_BASE (CONFIG_IRAM_TOP - 0x1000)
/*
* Low Power settings
*/
#define CONFIG_LOWPOWER_FLAG 0x02020028
#define CONFIG_LOWPOWER_ADDR 0x0202002C
/*
* Number of CPUs available
*/
#define CONFIG_CORE_COUNT 0x8
#endif /* __CONFIG_EXYNOS5420_H */

Loading…
Cancel
Save