armv8: add hooks for all cache-wide operations

SoC-specific logic may be required for all forms of cache-wide
operations; invalidate and flush of both dcache and icache (note that
only 3 of the 4 possible combinations make sense, since the icache never
contains dirty lines). This patch adds an optional hook for all
implemented cache-wide operations, and renames the one existing hook to
better represent exactly which operation it is implementing. A dummy
no-op implementation of each hook is provided.

Signed-off-by: Stephen Warren <swarren@nvidia.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
Signed-off-by: Tom Warren <twarren@nvidia.com>
master
Stephen Warren 8 years ago committed by Tom Warren
parent b9ae6415b6
commit 1ab557a074
  1. 18
      arch/arm/cpu/armv8/cache.S
  2. 8
      arch/arm/cpu/armv8/cache_v8.c
  3. 4
      arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
  4. 4
      arch/arm/include/asm/system.h
  5. 4
      arch/arm/mach-tegra/tegra186/cache.S

@ -150,11 +150,23 @@ ENTRY(__asm_invalidate_icache_all)
ret
ENDPROC(__asm_invalidate_icache_all)
ENTRY(__asm_flush_l3_cache)
ENTRY(__asm_invalidate_l3_dcache)
mov x0, #0 /* return status as success */
ret
ENDPROC(__asm_flush_l3_cache)
.weak __asm_flush_l3_cache
ENDPROC(__asm_invalidate_l3_dcache)
.weak __asm_invalidate_l3_dcache
ENTRY(__asm_flush_l3_dcache)
mov x0, #0 /* return status as success */
ret
ENDPROC(__asm_flush_l3_dcache)
.weak __asm_flush_l3_dcache
ENTRY(__asm_invalidate_l3_icache)
mov x0, #0 /* return status as success */
ret
ENDPROC(__asm_invalidate_l3_icache)
.weak __asm_invalidate_l3_icache
/*
* void __asm_switch_ttbr(ulong new_ttbr)

@ -421,19 +421,20 @@ __weak void mmu_setup(void)
void invalidate_dcache_all(void)
{
__asm_invalidate_dcache_all();
__asm_invalidate_l3_dcache();
}
/*
* Performs a clean & invalidation of the entire data cache at all levels.
* This function needs to be inline to avoid using stack.
* __asm_flush_l3_cache return status of timeout
* __asm_flush_l3_dcache return status of timeout
*/
inline void flush_dcache_all(void)
{
int ret;
__asm_flush_dcache_all();
ret = __asm_flush_l3_cache();
ret = __asm_flush_l3_dcache();
if (ret)
debug("flushing dcache returns 0x%x\n", ret);
else
@ -623,7 +624,7 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
void icache_enable(void)
{
__asm_invalidate_icache_all();
invalidate_icache_all();
set_sctlr(get_sctlr() | CR_I);
}
@ -640,6 +641,7 @@ int icache_status(void)
void invalidate_icache_all(void)
{
__asm_invalidate_icache_all();
__asm_invalidate_l3_icache();
}
#else /* CONFIG_SYS_ICACHE_OFF */

@ -245,7 +245,7 @@ hnf_set_pstate:
ret
ENTRY(__asm_flush_l3_cache)
ENTRY(__asm_flush_l3_dcache)
/*
* Return status in x0
* success 0
@ -275,7 +275,7 @@ ENTRY(__asm_flush_l3_cache)
mov x0, x8
mov lr, x29
ret
ENDPROC(__asm_flush_l3_cache)
ENDPROC(__asm_flush_l3_dcache)
#endif
#ifdef CONFIG_MP

@ -93,7 +93,9 @@ void __asm_invalidate_dcache_all(void);
void __asm_flush_dcache_range(u64 start, u64 end);
void __asm_invalidate_tlb_all(void);
void __asm_invalidate_icache_all(void);
int __asm_flush_l3_cache(void);
int __asm_invalidate_l3_dcache(void);
int __asm_flush_l3_dcache(void);
int __asm_invalidate_l3_icache(void);
void __asm_switch_ttbr(u64 new_ttbr);
void armv8_switch_to_el2(void);

@ -10,7 +10,7 @@
#define SMC_SIP_INVOKE_MCE 0x82FFFF00
#define MCE_SMC_ROC_FLUSH_CACHE (SMC_SIP_INVOKE_MCE | 11)
ENTRY(__asm_flush_l3_cache)
ENTRY(__asm_flush_l3_dcache)
mov x0, #(MCE_SMC_ROC_FLUSH_CACHE & 0xffff)
movk x0, #(MCE_SMC_ROC_FLUSH_CACHE >> 16), lsl #16
mov x1, #0
@ -22,4 +22,4 @@ ENTRY(__asm_flush_l3_cache)
smc #0
mov x0, #0
ret
ENDPROC(__asm_flush_l3_cache)
ENDPROC(__asm_flush_l3_dcache)

Loading…
Cancel
Save