@ -10,8 +10,145 @@
# include <linux/kernel.h>
# include <linux/log2.h>
# include <asm/arcregs.h>
# include <asm/arc-bcr.h>
# include <asm/cache.h>
/*
* [ NOTE 1 ] :
* Data cache ( L1 D $ or SL $ ) entire invalidate operation or data cache disable
* operation may result in unexpected behavior and data loss even if we flush
* data cache right before invalidation . That may happens if we store any context
* on stack ( like we store BLINK register on stack before function call ) .
* BLINK register is the register where return address is automatically saved
* when we do function call with instructions like ' bl ' .
*
* There is the real example :
* We may hang in the next code as we store any BLINK register on stack in
* invalidate_dcache_all ( ) function .
*
* void flush_dcache_all ( ) {
* __dc_entire_op ( OP_FLUSH ) ;
* // Other code //
* }
*
* void invalidate_dcache_all ( ) {
* __dc_entire_op ( OP_INV ) ;
* // Other code //
* }
*
* void foo ( void ) {
* flush_dcache_all ( ) ;
* invalidate_dcache_all ( ) ;
* }
*
* Now let ' s see what really happens during that code execution :
*
* foo ( )
* | - > > call flush_dcache_all
* [ return address is saved to BLINK register ]
* [ push BLINK ] ( save to stack ) ! [ point 1 ]
* | - > > call __dc_entire_op ( OP_FLUSH )
* [ return address is saved to BLINK register ]
* [ flush L1 D $ ]
* return [ jump to BLINK ]
* < < - - - - - -
* [ other flush_dcache_all code ]
* [ pop BLINK ] ( get from stack )
* return [ jump to BLINK ]
* < < - - - - - -
* | - > > call invalidate_dcache_all
* [ return address is saved to BLINK register ]
* [ push BLINK ] ( save to stack ) ! [ point 2 ]
* | - > > call __dc_entire_op ( OP_FLUSH )
* [ return address is saved to BLINK register ]
* [ invalidate L1 D $ ] ! [ point 3 ]
* // Oops!!!
* // We lose return address from invalidate_dcache_all function:
* // we save it to stack and invalidate L1 D$ after that!
* return [ jump to BLINK ]
* < < - - - - - -
* [ other invalidate_dcache_all code ]
* [ pop BLINK ] ( get from stack )
* // we don't have this data in L1 dcache as we invalidated it in [point 3]
* // so we get it from next memory level (for example DDR memory)
* // but in the memory we have value which we save in [point 1], which
* // is return address from flush_dcache_all function (instead of
* // address from current invalidate_dcache_all function which we
* // saved in [point 2] !)
* return [ jump to BLINK ]
* < < - - - - - -
* // As BLINK points to invalidate_dcache_all, we call it again and
* // loop forever.
*
* Fortunately we may fix that by using flush & invalidation of D $ with a single
* one instruction ( instead of flush and invalidation instructions pair ) and
* enabling force function inline with ' __attribute__ ( ( always_inline ) ) ' gcc
* attribute to avoid any function call ( and BLINK store ) between cache flush
* and disable .
*
*
* [ NOTE 2 ] :
* As of today we only support the following cache configurations on ARC .
* Other configurations may exist in HW ( for example , since version 3.0 HS
* supports SL $ ( L2 system level cache ) disable ) but we don ' t support it in SW .
* Configuration 1 :
* ______________________
* | |
* | ARC CPU |
* | ______________________ |
* ___ | ___ ___ | ___
* | | | |
* | L1 I $ | | L1 D $ |
* | _______ | | _______ |
* on / off on / off
* ___ | ______________ | ____
* | |
* | main memory |
* | ______________________ |
*
* Configuration 2 :
* ______________________
* | |
* | ARC CPU |
* | ______________________ |
* ___ | ___ ___ | ___
* | | | |
* | L1 I $ | | L1 D $ |
* | _______ | | _______ |
* on / off on / off
* ___ | ______________ | ____
* | |
* | L2 ( SL $ ) |
* | ______________________ |
* always must be on
* ___ | ______________ | ____
* | |
* | main memory |
* | ______________________ |
*
* Configuration 3 :
* ______________________
* | |
* | ARC CPU |
* | ______________________ |
* ___ | ___ ___ | ___
* | | | |
* | L1 I $ | | L1 D $ |
* | _______ | | _______ |
* on / off must be on
* ___ | ______________ | ____ _______
* | | | |
* | L2 ( SL $ ) | - - - - - | IOC |
* | ______________________ | | _______ |
* always must be on on / off
* ___ | ______________ | ____
* | |
* | main memory |
* | ______________________ |
*/
DECLARE_GLOBAL_DATA_PTR ;
/* Bit values in IC_CTRL */
# define IC_CTRL_CACHE_DISABLE BIT(0)
@ -19,11 +156,10 @@
# define DC_CTRL_CACHE_DISABLE BIT(0)
# define DC_CTRL_INV_MODE_FLUSH BIT(6)
# define DC_CTRL_FLUSH_STATUS BIT(8)
# define CACHE_VER_NUM_MASK 0xF
# define OP_INV 0x1
# define OP_FLUSH 0x2
# define OP_INV_IC 0x3
# define OP_INV BIT(0)
# define OP_FLUSH BIT(1)
# define OP_FLUSH_N_INV (OP_FLUSH | OP_INV)
/* Bit val in SLC_CONTROL */
# define SLC_CTRL_DIS 0x001
@ -31,55 +167,117 @@
# define SLC_CTRL_BUSY 0x100
# define SLC_CTRL_RGN_OP_INV 0x200
# define CACHE_LINE_MASK (~(gd->arch.l1_line_sz - 1))
/*
* By default that variable will fall into . bss section .
* But . bss section is not relocated and so it will be initilized bef ore
* relocation but will be used after being zeroed .
* We don ' t want to use ' __always_inline ' macro here as it can be redefined
* to simple ' inline ' in some cases which breaks stuff . See [ NOTE 1 ] for m ore
* details about the reasons we need to use always_inline functions .
*/
int l1_line_sz __section ( " .data " ) ;
bool dcache_exists __section ( " .data " ) = false ;
bool icache_exists __section ( " .data " ) = false ;
# define CACHE_LINE_MASK (~(l1_line_sz - 1))
# ifdef CONFIG_ISA_ARCV2
int slc_line_sz __section ( " .data " ) ;
bool slc_exists __section ( " .data " ) = false ;
bool ioc_exists __section ( " .data " ) = false ;
bool pae_exists __section ( " .data " ) = false ;
# define inlined_cachefunc inline __attribute__((always_inline))
/* To force enable IOC set ioc_enable to 'true' */
bool ioc_enable __section ( " .data " ) = false ;
static inlined_cachefunc void __ic_entire_invalidate ( void ) ;
static inlined_cachefunc void __dc_entire_op ( const int cacheop ) ;
void read_decode_mmu_bcr ( void )
static inline bool pae_exists ( void )
{
/* TODO: should we compare mmu version from BCR and from CONFIG? */
# if (CONFIG_ARC_MMU_VER >= 4)
u32 tmp ;
union bcr_mmu_4 mmu4 ;
tmp = read_aux_reg ( ARC_AUX_MMU_BCR ) ;
mmu4 . word = read_aux_reg ( ARC_AUX_MMU_BCR ) ;
struct bcr_mmu_4 {
# ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver : 8 , sasid : 1 , sz1 : 4 , sz0 : 4 , res : 2 , pae : 1 ,
n_ways : 2 , n_entry : 2 , n_super : 2 , u_itlb : 3 , u_dtlb : 3 ;
# else
/* DTLB ITLB JES JE JA */
unsigned int u_dtlb : 3 , u_itlb : 3 , n_super : 2 , n_entry : 2 , n_ways : 2 ,
pae : 1 , res : 2 , sz0 : 4 , sz1 : 4 , sasid : 1 , ver : 8 ;
# endif /* CONFIG_CPU_BIG_ENDIAN */
} * mmu4 ;
if ( mmu4 . fields . pae )
return true ;
# endif /* (CONFIG_ARC_MMU_VER >= 4) */
mmu4 = ( struct bcr_mmu_4 * ) & tmp ;
return false ;
}
pae_exists = ! ! mmu4 - > pae ;
# endif /* (CONFIG_ARC_MMU_VER >= 4) */
static inlined_cachefunc bool icache_exists ( void )
{
union bcr_di_cache ibcr ;
ibcr . word = read_aux_reg ( ARC_BCR_IC_BUILD ) ;
return ! ! ibcr . fields . ver ;
}
static void __slc_entire_op ( const int op )
static inlined_cachefunc bool icache_enabled ( void )
{
if ( ! icache_exists ( ) )
return false ;
return ! ( read_aux_reg ( ARC_AUX_IC_CTRL ) & IC_CTRL_CACHE_DISABLE ) ;
}
static inlined_cachefunc bool dcache_exists ( void )
{
union bcr_di_cache dbcr ;
dbcr . word = read_aux_reg ( ARC_BCR_DC_BUILD ) ;
return ! ! dbcr . fields . ver ;
}
static inlined_cachefunc bool dcache_enabled ( void )
{
if ( ! dcache_exists ( ) )
return false ;
return ! ( read_aux_reg ( ARC_AUX_DC_CTRL ) & DC_CTRL_CACHE_DISABLE ) ;
}
static inlined_cachefunc bool slc_exists ( void )
{
if ( is_isa_arcv2 ( ) ) {
union bcr_generic sbcr ;
sbcr . word = read_aux_reg ( ARC_BCR_SLC ) ;
return ! ! sbcr . fields . ver ;
}
return false ;
}
static inlined_cachefunc bool slc_data_bypass ( void )
{
/*
* If L1 data cache is disabled SL $ is bypassed and all load / store
* requests are sent directly to main memory .
*/
return ! dcache_enabled ( ) ;
}
static inline bool ioc_exists ( void )
{
if ( is_isa_arcv2 ( ) ) {
union bcr_clust_cfg cbcr ;
cbcr . word = read_aux_reg ( ARC_BCR_CLUSTER ) ;
return cbcr . fields . c ;
}
return false ;
}
static inline bool ioc_enabled ( void )
{
/*
* We check only CONFIG option instead of IOC HW state check as IOC
* must be disabled by default .
*/
if ( is_ioc_enabled ( ) )
return ioc_exists ( ) ;
return false ;
}
static inlined_cachefunc void __slc_entire_op ( const int op )
{
unsigned int ctrl ;
if ( ! slc_exists ( ) )
return ;
ctrl = read_aux_reg ( ARC_AUX_SLC_CTRL ) ;
if ( ! ( op & OP_FLUSH ) ) /* i.e. OP_INV */
@ -104,6 +302,14 @@ static void __slc_entire_op(const int op)
static void slc_upper_region_init ( void )
{
/*
* ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
* only if PAE exists in current HW . So we had to check pae_exist
* before using them .
*/
if ( ! pae_exists ( ) )
return ;
/*
* ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always = = 0
* as we don ' t use PAE40 .
*/
@ -113,9 +319,14 @@ static void slc_upper_region_init(void)
static void __slc_rgn_op ( unsigned long paddr , unsigned long sz , const int op )
{
# ifdef CONFIG_ISA_ARCV2
unsigned int ctrl ;
unsigned long end ;
if ( ! slc_exists ( ) )
return ;
/*
* The Region Flush operation is specified by CTRL . RGN_OP [ 11. .9 ]
* - b ' 000 ( default ) is Flush ,
@ -142,7 +353,7 @@ static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
* END needs to be setup before START ( latter triggers the operation )
* END can ' t be same as START , so add ( l2_line_sz - 1 ) to sz
*/
end = paddr + sz + slc_line_sz - 1 ;
end = paddr + sz + gd - > arch . slc_line_sz - 1 ;
/*
* Upper addresses ( ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 )
@ -156,85 +367,82 @@ static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
read_aux_reg ( ARC_AUX_SLC_CTRL ) ;
while ( read_aux_reg ( ARC_AUX_SLC_CTRL ) & SLC_CTRL_BUSY ) ;
}
# endif /* CONFIG_ISA_ARCV2 */
}
static void arc_ioc_setup ( void )
{
/* IOC Aperture start is equal to DDR start */
unsigned int ap_base = CONFIG_SYS_SDRAM_BASE ;
/* IOC Aperture size is equal to DDR size */
long ap_size = CONFIG_SYS_SDRAM_SIZE ;
/* Unsupported configuration. See [ NOTE 2 ] for more details. */
if ( ! slc_exists ( ) )
panic ( " Try to enable IOC but SLC is not present " ) ;
/* Unsupported configuration. See [ NOTE 2 ] for more details. */
if ( ! dcache_enabled ( ) )
panic ( " Try to enable IOC but L1 D$ is disabled " ) ;
if ( ! is_power_of_2 ( ap_size ) | | ap_size < 4096 )
panic ( " IOC Aperture size must be power of 2 and bigger 4Kib " ) ;
/* IOC Aperture start must be aligned to the size of the aperture */
if ( ap_base % ap_size ! = 0 )
panic ( " IOC Aperture start must be aligned to the size of the aperture " ) ;
flush_n_invalidate_dcache_all ( ) ;
/*
* IOC Aperture size decoded as 2 ^ ( SIZE + 2 ) KB ,
* so setting 0x11 implies 512 M , 0x12 implies 1 G . . .
*/
write_aux_reg ( ARC_AUX_IO_COH_AP0_SIZE ,
order_base_2 ( ap_size / 1024 ) - 2 ) ;
write_aux_reg ( ARC_AUX_IO_COH_AP0_BASE , ap_base > > 12 ) ;
write_aux_reg ( ARC_AUX_IO_COH_PARTIAL , 1 ) ;
write_aux_reg ( ARC_AUX_IO_COH_ENABLE , 1 ) ;
}
# ifdef CONFIG_ISA_ARCV2
static void read_decode_cache_bcr_arcv2 ( void )
{
union {
struct {
# ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad : 24 , way : 2 , lsz : 2 , sz : 4 ;
# else
unsigned int sz : 4 , lsz : 2 , way : 2 , pad : 24 ;
# endif
} fields ;
unsigned int word ;
} slc_cfg ;
union {
struct {
# ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad : 24 , ver : 8 ;
# else
unsigned int ver : 8 , pad : 24 ;
# endif
} fields ;
unsigned int word ;
} sbcr ;
# ifdef CONFIG_ISA_ARCV2
sbcr . word = read_aux_reg ( ARC_BCR_SLC ) ;
if ( sbcr . fields . ver ) {
union bcr_slc_cfg slc_cfg ;
if ( slc_exists ( ) ) {
slc_cfg . word = read_aux_reg ( ARC_AUX_SLC_CONFIG ) ;
slc_exists = true ;
slc_line_sz = ( slc_cfg . fields . lsz = = 0 ) ? 128 : 64 ;
}
gd - > arch . slc_line_sz = ( slc_cfg . fields . lsz = = 0 ) ? 128 : 64 ;
union {
struct bcr_clust_cfg {
# ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad : 7 , c : 1 , num_entries : 8 , num_cores : 8 , ver : 8 ;
# else
unsigned int ver : 8 , num_cores : 8 , num_entries : 8 , c : 1 , pad : 7 ;
# endif
} fields ;
unsigned int word ;
} cbcr ;
/*
* We don ' t support configuration where L1 I $ or L1 D $ is
* absent but SL $ exists . See [ NOTE 2 ] for more details .
*/
if ( ! icache_exists ( ) | | ! dcache_exists ( ) )
panic ( " Unsupported cache configuration: SLC exists but one of L1 caches is absent " ) ;
}
cbcr . word = read_aux_reg ( ARC_BCR_CLUSTER ) ;
if ( cbcr . fields . c & & ioc_enable )
ioc_exists = true ;
# endif /* CONFIG_ISA_ARCV2 */
}
# endif
void read_decode_cache_bcr ( void )
{
int dc_line_sz = 0 , ic_line_sz = 0 ;
union {
struct {
# ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad : 12 , line_len : 4 , sz : 4 , config : 4 , ver : 8 ;
# else
unsigned int ver : 8 , config : 4 , sz : 4 , line_len : 4 , pad : 12 ;
# endif
} fields ;
unsigned int word ;
} ibcr , dbcr ;
union bcr_di_cache ibcr , dbcr ;
ibcr . word = read_aux_reg ( ARC_BCR_IC_BUILD ) ;
if ( ibcr . fields . ver ) {
icache_exists = true ;
l1_line_sz = ic_line_sz = 8 < < ibcr . fields . line_len ;
gd - > arch . l1_line_sz = ic_line_sz = 8 < < ibcr . fields . line_len ;
if ( ! ic_line_sz )
panic ( " Instruction exists but line length is 0 \n " ) ;
}
dbcr . word = read_aux_reg ( ARC_BCR_DC_BUILD ) ;
if ( dbcr . fields . ver ) {
dcache_exists = true ;
l1_line_sz = dc_line_sz = 16 < < dbcr . fields . line_len ;
gd - > arch . l1_line_sz = dc_line_sz = 16 < < dbcr . fields . line_len ;
if ( ! dc_line_sz )
panic ( " Data cache exists but line length is 0 \n " ) ;
}
@ -247,109 +455,79 @@ void cache_init(void)
{
read_decode_cache_bcr ( ) ;
# ifdef CONFIG_ISA_ARCV2
read_decode_cache_bcr_arcv2 ( ) ;
if ( ioc_exists ) {
/* IOC Aperture start is equal to DDR start */
unsigned int ap_base = CONFIG_SYS_SDRAM_BASE ;
/* IOC Aperture size is equal to DDR size */
long ap_size = CONFIG_SYS_SDRAM_SIZE ;
flush_dcache_all ( ) ;
invalidate_dcache_all ( ) ;
if ( is_isa_arcv2 ( ) )
read_decode_cache_bcr_arcv2 ( ) ;
if ( ! is_power_of_2 ( ap_size ) | | ap_size < 4096 )
panic ( " IOC Aperture size must be power of 2 and bigger 4Kib " ) ;
/*
* IOC Aperture size decoded as 2 ^ ( SIZE + 2 ) KB ,
* so setting 0x11 implies 512 M , 0x12 implies 1 G . . .
*/
write_aux_reg ( ARC_AUX_IO_COH_AP0_SIZE ,
order_base_2 ( ap_size / 1024 ) - 2 ) ;
/* IOC Aperture start must be aligned to the size of the aperture */
if ( ap_base % ap_size ! = 0 )
panic ( " IOC Aperture start must be aligned to the size of the aperture " ) ;
write_aux_reg ( ARC_AUX_IO_COH_AP0_BASE , ap_base > > 12 ) ;
write_aux_reg ( ARC_AUX_IO_COH_PARTIAL , 1 ) ;
write_aux_reg ( ARC_AUX_IO_COH_ENABLE , 1 ) ;
}
if ( is_isa_arcv2 ( ) & & ioc_enabled ( ) )
arc_ioc_setup ( ) ;
read_decode_mmu_bcr ( ) ;
/*
* ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
* only if PAE exists in current HW . So we had to check pae_exist
* before using them .
*/
if ( slc_exists & & pae_exists )
if ( is_isa_arcv2 ( ) & & slc_exists ( ) )
slc_upper_region_init ( ) ;
# endif /* CONFIG_ISA_ARCV2 */
}
int icache_status ( void )
{
if ( ! icache_exists )
return 0 ;
if ( read_aux_reg ( ARC_AUX_IC_CTRL ) & IC_CTRL_CACHE_DISABLE )
return 0 ;
else
return 1 ;
return icache_enabled ( ) ;
}
void icache_enable ( void )
{
if ( icache_exists )
if ( icache_exists ( ) )
write_aux_reg ( ARC_AUX_IC_CTRL , read_aux_reg ( ARC_AUX_IC_CTRL ) &
~ IC_CTRL_CACHE_DISABLE ) ;
}
void icache_disable ( void )
{
if ( icache_exists )
write_aux_reg ( ARC_AUX_IC_CTRL , read_aux_reg ( ARC_AUX_IC_CTRL ) |
IC_CTRL_CACHE_DISABLE ) ;
if ( ! icache_exists ( ) )
return ;
__ic_entire_invalidate ( ) ;
write_aux_reg ( ARC_AUX_IC_CTRL , read_aux_reg ( ARC_AUX_IC_CTRL ) |
IC_CTRL_CACHE_DISABLE ) ;
}
void invalidate_icache_all ( void )
/* IC supports only invalidation */
static inlined_cachefunc void __ic_entire_invalidate ( void )
{
if ( ! icache_enabled ( ) )
return ;
/* Any write to IC_IVIC register triggers invalidation of entire I$ */
if ( icache_status ( ) ) {
write_aux_reg ( ARC_AUX_IC_IVIC , 1 ) ;
/*
* As per ARC HS databook ( see chapter 5.3 .3 .2 )
* it is required to add 3 NOPs after each write to IC_IVIC .
*/
__builtin_arc_nop ( ) ;
__builtin_arc_nop ( ) ;
__builtin_arc_nop ( ) ;
read_aux_reg ( ARC_AUX_IC_CTRL ) ; /* blocks */
}
write_aux_reg ( ARC_AUX_IC_IVIC , 1 ) ;
/*
* As per ARC HS databook ( see chapter 5.3 .3 .2 )
* it is required to add 3 NOPs after each write to IC_IVIC .
*/
__builtin_arc_nop ( ) ;
__builtin_arc_nop ( ) ;
__builtin_arc_nop ( ) ;
read_aux_reg ( ARC_AUX_IC_CTRL ) ; /* blocks */
}
# ifdef CONFIG_ISA_ARCV2
if ( slc_exists )
void invalidate_icache_all ( void )
{
__ic_entire_invalidate ( ) ;
/*
* If SL $ is bypassed for data it is used only for instructions ,
* so we need to invalidate it too .
* TODO : HS 3.0 supports SLC disable so we need to check slc
* enable / disable status here .
*/
if ( is_isa_arcv2 ( ) & & slc_data_bypass ( ) )
__slc_entire_op ( OP_INV ) ;
# endif
}
int dcache_status ( void )
{
if ( ! dcache_exists )
return 0 ;
if ( read_aux_reg ( ARC_AUX_DC_CTRL ) & DC_CTRL_CACHE_DISABLE )
return 0 ;
else
return 1 ;
return dcache_enabled ( ) ;
}
void dcache_enable ( void )
{
if ( ! dcache_exists )
if ( ! dcache_exists ( ) )
return ;
write_aux_reg ( ARC_AUX_DC_CTRL , read_aux_reg ( ARC_AUX_DC_CTRL ) &
@ -358,83 +536,77 @@ void dcache_enable(void)
void dcache_disable ( void )
{
if ( ! dcache_exists )
if ( ! dcache_exists ( ) )
return ;
__dc_entire_op ( OP_FLUSH_N_INV ) ;
/*
* As SLC will be bypassed for data after L1 D $ disable we need to
* flush it first before L1 D $ disable . Also we invalidate SLC to
* avoid any inconsistent data problems after enabling L1 D $ again with
* dcache_enable function .
*/
if ( is_isa_arcv2 ( ) )
__slc_entire_op ( OP_FLUSH_N_INV ) ;
write_aux_reg ( ARC_AUX_DC_CTRL , read_aux_reg ( ARC_AUX_DC_CTRL ) |
DC_CTRL_CACHE_DISABLE ) ;
}
# ifndef CONFIG_SYS_DCACHE_OFF
/*
* Common Helper for Line Operations on { I , D } - Cache
*/
static inline void __cache_line_loop ( unsigned long paddr , unsigned long sz ,
const int cacheop )
/* Common Helper for Line Operations on D-cache */
static inline void __dcache_line_loop ( unsigned long paddr , unsigned long sz ,
const int cacheop )
{
unsigned int aux_cmd ;
# if (CONFIG_ARC_MMU_VER == 3)
unsigned int aux_tag ;
# endif
int num_lines ;
if ( cacheop = = OP_INV_IC ) {
aux_cmd = ARC_AUX_IC_IVIL ;
# if (CONFIG_ARC_MMU_VER == 3)
aux_tag = ARC_AUX_IC_PTAG ;
# endif
} else {
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL ;
# if (CONFIG_ARC_MMU_VER == 3)
aux_tag = ARC_AUX_DC_PTAG ;
# endif
}
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL ;
sz + = paddr & ~ CACHE_LINE_MASK ;
paddr & = CACHE_LINE_MASK ;
num_lines = DIV_ROUND_UP ( sz , l1_line_sz ) ;
num_lines = DIV_ROUND_UP ( sz , gd - > arch . l1_line_sz ) ;
while ( num_lines - - > 0 ) {
# if (CONFIG_ARC_MMU_VER == 3)
write_aux_reg ( aux_tag , paddr ) ;
write_aux_reg ( ARC_AUX_DC_PTAG , paddr ) ;
# endif
write_aux_reg ( aux_cmd , paddr ) ;
paddr + = l1_line_sz ;
paddr + = gd - > arch . l1_line_sz ;
}
}
static unsigned int __before_dc_op ( const int op )
static inlined_cachefunc void __before_dc_op ( const int op )
{
unsigned int reg ;
unsigned int ctrl ;
if ( op = = OP_INV ) {
/*
* IM is set by default and implies Flush - n - inv
* Clear it here for vanilla inv
*/
reg = read_aux_reg ( ARC_AUX_DC_CTRL ) ;
write_aux_reg ( ARC_AUX_DC_CTRL , reg & ~ DC_CTRL_INV_MODE_FLUSH ) ;
}
ctrl = read_aux_reg ( ARC_AUX_DC_CTRL ) ;
return reg ;
/* IM bit implies flush-n-inv, instead of vanilla inv */
if ( op = = OP_INV )
ctrl & = ~ DC_CTRL_INV_MODE_FLUSH ;
else
ctrl | = DC_CTRL_INV_MODE_FLUSH ;
write_aux_reg ( ARC_AUX_DC_CTRL , ctrl ) ;
}
static void __after_dc_op ( const int op , unsigned int reg )
static inlined_cachefunc void __after_dc_op ( const int op )
{
if ( op & OP_FLUSH ) /* flush / flush-n-inv both wait */
while ( read_aux_reg ( ARC_AUX_DC_CTRL ) & DC_CTRL_FLUSH_STATUS ) ;
/* Switch back to default Invalidate mode */
if ( op = = OP_INV )
write_aux_reg ( ARC_AUX_DC_CTRL , reg | DC_CTRL_INV_MODE_FLUSH ) ;
}
static inline void __dc_entire_op ( const int cacheop )
static inlined_cachefunc void __dc_entire_op ( const int cacheop )
{
int aux ;
unsigned int ctrl_reg = __before_dc_op ( cacheop ) ;
if ( ! dcache_enabled ( ) )
return ;
__before_dc_op ( cacheop ) ;
if ( cacheop & OP_INV ) /* Inv or flush-n-inv use same cmd reg */
aux = ARC_AUX_DC_IVDC ;
@ -443,36 +615,36 @@ static inline void __dc_entire_op(const int cacheop)
write_aux_reg ( aux , 0x1 ) ;
__after_dc_op ( cacheop , ctrl_reg ) ;
__after_dc_op ( cacheop ) ;
}
static inline void __dc_line_op ( unsigned long paddr , unsigned long sz ,
const int cacheop )
{
unsigned int ctrl_reg = __before_dc_op ( cacheop ) ;
if ( ! dcache_enabled ( ) )
return ;
__cache_line_loop ( paddr , sz , cacheop ) ;
__after_dc_op ( cacheop , ctrl_reg ) ;
__before_dc_op ( cacheop ) ;
__dcache_line_loop ( paddr , sz , cacheop ) ;
__after_dc_op ( cacheop ) ;
}
# else
# define __dc_entire_op(cacheop)
# define __dc_line_op(paddr, sz, cacheop)
# endif /* !CONFIG_SYS_DCACHE_OFF */
void invalidate_dcache_range ( unsigned long start , unsigned long end )
{
if ( start > = end )
return ;
# ifdef CONFIG_ISA_ARCV2
if ( ! ioc_exists )
# endif
/*
* ARCv1 - > call __dc_line_op
* ARCv2 & & L1 D $ disabled - > nothing
* ARCv2 & & L1 D $ enabled & & IOC enabled - > nothing
* ARCv2 & & L1 D $ enabled & & no IOC - > call __dc_line_op ; call __slc_rgn_op
*/
if ( ! is_isa_arcv2 ( ) | | ! ioc_enabled ( ) )
__dc_line_op ( start , end - start , OP_INV ) ;
# ifdef CONFIG_ISA_ARCV2
if ( slc_exists & & ! ioc_exists )
if ( is_isa_arcv2 ( ) & & ! ioc_enabled ( ) & & ! slc_data_bypass ( ) )
__slc_rgn_op ( start , end - start , OP_INV ) ;
# endif
}
void flush_dcache_range ( unsigned long start , unsigned long end )
@ -480,15 +652,17 @@ void flush_dcache_range(unsigned long start, unsigned long end)
if ( start > = end )
return ;
# ifdef CONFIG_ISA_ARCV2
if ( ! ioc_exists )
# endif
/*
* ARCv1 - > call __dc_line_op
* ARCv2 & & L1 D $ disabled - > nothing
* ARCv2 & & L1 D $ enabled & & IOC enabled - > nothing
* ARCv2 & & L1 D $ enabled & & no IOC - > call __dc_line_op ; call __slc_rgn_op
*/
if ( ! is_isa_arcv2 ( ) | | ! ioc_enabled ( ) )
__dc_line_op ( start , end - start , OP_FLUSH ) ;
# ifdef CONFIG_ISA_ARCV2
if ( slc_exists & & ! ioc_exists )
if ( is_isa_arcv2 ( ) & & ! ioc_enabled ( ) & & ! slc_data_bypass ( ) )
__slc_rgn_op ( start , end - start , OP_FLUSH ) ;
# endif
}
void flush_cache ( unsigned long start , unsigned long size )
@ -496,22 +670,47 @@ void flush_cache(unsigned long start, unsigned long size)
flush_dcache_range ( start , start + size ) ;
}
void invalidate_dcache_all ( void )
/*
* As invalidate_dcache_all ( ) is not used in generic U - Boot code and as we
* don ' t need it in arch / arc code alone ( invalidate without flush ) we implement
* flush_n_invalidate_dcache_all ( flush and invalidate in 1 operation ) because
* it ' s much safer . See [ NOTE 1 ] for more details .
*/
void flush_n_invalidate_dcache_all ( void )
{
__dc_entire_op ( OP_INV ) ;
__dc_entire_op ( OP_FLUSH_N_ INV ) ;
# ifdef CONFIG_ISA_ARCV2
if ( slc_exists )
__slc_entire_op ( OP_INV ) ;
# endif
if ( is_isa_arcv2 ( ) & & ! slc_data_bypass ( ) )
__slc_entire_op ( OP_FLUSH_N_INV ) ;
}
void flush_dcache_all ( void )
{
__dc_entire_op ( OP_FLUSH ) ;
# ifdef CONFIG_ISA_ARCV2
if ( slc_exists )
if ( is_isa_arcv2 ( ) & & ! slc_data_bypass ( ) )
__slc_entire_op ( OP_FLUSH ) ;
# endif
}
/*
* This is function to cleanup all caches ( and therefore sync I / D caches ) which
* can be used for cleanup before linux launch or to sync caches during
* relocation .
*/
void sync_n_cleanup_cache_all ( void )
{
__dc_entire_op ( OP_FLUSH_N_INV ) ;
/*
* If SL $ is bypassed for data it is used only for instructions ,
* and we shouldn ' t flush it . So invalidate it instead of flush_n_inv .
*/
if ( is_isa_arcv2 ( ) ) {
if ( slc_data_bypass ( ) )
__slc_entire_op ( OP_INV ) ;
else
__slc_entire_op ( OP_FLUSH_N_INV ) ;
}
__ic_entire_invalidate ( ) ;
}