@ -19,11 +19,12 @@
* clean a n d i n v a l i d a t e o n e l e v e l c a c h e .
*
* x0 : cache l e v e l
* x1 ~ x9 : c l o b b e r e d
* x1 : 0 flush & i n v a l i d a t e , 1 i n v a l i d a t e o n l y
* x2 ~ x9 : c l o b b e r e d
* /
ENTRY( _ _ a s m _ f l u s h _ d c a c h e _ l e v e l )
lsl x1 , x0 , #1
msr c s s e l r _ e l 1 , x1 / * s e l e c t c a c h e l e v e l * /
lsl x12 , x0 , #1
msr c s s e l r _ e l 1 , x12 / * s e l e c t c a c h e l e v e l * /
isb / * s y n c c h a n g e o f c s s i d r _ e l 1 * /
mrs x6 , c c s i d r _ e l 1 / * r e a d t h e n e w c s s i d r _ e l 1 * /
and x2 , x6 , #7 / * x2 < - l o g 2 ( c a c h e l i n e s i z e ) - 4 * /
@ -35,7 +36,7 @@ ENTRY(__asm_flush_dcache_level)
clz w5 , w4 / * b i t p o s i t i o n o f #w a y s * /
mov x4 , #0x7fff
and x4 , x4 , x6 , l s r #13 / * x4 < - m a x n u m b e r o f #s e t s * /
/* x1 <- cache level << 1 */
/* x12 <- cache level << 1 */
/* x2 <- line length offset */
/* x3 <- number of cache ways - 1 */
/* x4 <- number of cache sets - 1 */
@ -45,11 +46,14 @@ loop_set:
mov x6 , x3 / * x6 < - w o r k i n g c o p y o f #w a y s * /
loop_way :
lsl x7 , x6 , x5
orr x9 , x1 , x7 / * m a p w a y a n d l e v e l t o c i s w v a l u e * /
orr x9 , x12 , x7 / * m a p w a y a n d l e v e l t o c i s w v a l u e * /
lsl x7 , x4 , x2
orr x9 , x9 , x7 / * m a p s e t n u m b e r t o c i s w v a l u e * /
dc c i s w , x9 / * c l e a n & i n v a l i d a t e b y s e t / w a y * /
subs x6 , x6 , #1 / * d e c r e m e n t t h e w a y * /
tbz w1 , #0 , 1 f
dc i s w , x9
b 2 f
1 : dc c i s w , x9 / * c l e a n & i n v a l i d a t e b y s e t / w a y * /
2 : subs x6 , x6 , #1 / * d e c r e m e n t t h e w a y * /
b. g e l o o p _ w a y
subs x4 , x4 , #1 / * d e c r e m e n t t h e s e t * /
b. g e l o o p _ s e t
@ -58,11 +62,14 @@ loop_way:
ENDPROC( _ _ a s m _ f l u s h _ d c a c h e _ l e v e l )
/ *
* void _ _ a s m _ f l u s h _ d c a c h e _ a l l ( v o i d )
* void _ _ a s m _ f l u s h _ d c a c h e _ a l l ( i n t i n v a l i d a t e _ o n l y )
*
* x0 : 0 flush & i n v a l i d a t e , 1 i n v a l i d a t e o n l y
*
* clean a n d i n v a l i d a t e a l l d a t a c a c h e b y S E T / W A Y .
* /
ENTRY( _ _ a s m _ f l u s h _ d c a c h e _ a l l )
ENTRY( _ _ a s m _ d c a c h e _ a l l )
mov x1 , x0
dsb s y
mrs x10 , c l i d r _ e l 1 / * r e a d c l i d r _ e l 1 * /
lsr x11 , x10 , #24
@ -76,13 +83,13 @@ ENTRY(__asm_flush_dcache_all)
/* x15 <- return address */
loop_level :
lsl x1 , x0 , #1
add x1 , x1 , x0 / * x0 < - t r i p l e d c a c h e l e v e l * /
lsr x1 , x10 , x1
and x1 , x1 , #7 / * x1 < - c a c h e t y p e * /
cmp x1 , #2
lsl x12 , x0 , #1
add x12 , x12 , x0 / * x0 < - t r i p l e d c a c h e l e v e l * /
lsr x12 , x10 , x12
and x12 , x12 , #7 / * x12 < - c a c h e t y p e * /
cmp x12 , #2
b. l t s k i p / * s k i p i f n o c a c h e o r i c a c h e * /
bl _ _ a s m _ f l u s h _ d c a c h e _ l e v e l
bl _ _ a s m _ f l u s h _ d c a c h e _ l e v e l / * x1 = 0 f l u s h , 1 i n v a l i d a t e * /
skip :
add x0 , x0 , #1 / * i n c r e m e n t c a c h e l e v e l * /
cmp x11 , x0
@ -96,8 +103,24 @@ skip:
finished :
ret
ENDPROC( _ _ a s m _ d c a c h e _ a l l )
ENTRY( _ _ a s m _ f l u s h _ d c a c h e _ a l l )
mov x16 , l r
mov x0 , #0
bl _ _ a s m _ d c a c h e _ a l l
mov l r , x16
ret
ENDPROC( _ _ a s m _ f l u s h _ d c a c h e _ a l l )
ENTRY( _ _ a s m _ i n v a l i d a t e _ d c a c h e _ a l l )
mov x16 , l r
mov x0 , #0xffff
bl _ _ a s m _ d c a c h e _ a l l
mov l r , x16
ret
ENDPROC( _ _ a s m _ i n v a l i d a t e _ d c a c h e _ a l l )
/ *
* void _ _ a s m _ f l u s h _ d c a c h e _ r a n g e ( s t a r t , e n d )
*