Refine the routines in mmu-arm.c and move them to mmu-arm.S since the code is now 100% assembly.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@19980 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Michael Sevakis 2009-02-11 23:56:00 +00:00
parent 00a9685985
commit 63e709c7c8
8 changed files with 515 additions and 346 deletions

View file

@ -57,13 +57,6 @@ void imx31_regclr32(volatile uint32_t *reg_p, uint32_t mask);
#define KDEV_INIT
#define HAVE_CPUCACHE_INVALIDATE
#define HAVE_CPUCACHE_FLUSH
/* Different internal names */
#define cpucache_flush clean_dcache
#define cpucache_invalidate invalidate_idcache
struct ARM_REGS {
int r0;
int r1;

View file

@ -0,0 +1,487 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2006,2007 by Greg White
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "config.h"
#include "cpu.h"
#if CONFIG_CPU == IMX31L
/* TTB routines not used */
/** Cache coherency **/
/*
* Invalidate DCache for this range
* will do write back
* void invalidate_dcache_range(const void *base, unsigned int size)
*/
.section .text, "ax", %progbits
.align 2
.global invalidate_dcache_range
.type invalidate_dcache_range, %function
@ MVA format: 31:5 = Modified virtual address, 4:0 = Ignored
invalidate_dcache_range:
add r1, r0, r1 @ size -> end
cmp r1, r0 @ end <= start?
subhi r1, r1, #1 @ round it down
movhi r2, #0 @
mcrrhi p15, 0, r1, r0, c14 @ Clean and invalidate DCache range
mcrhi p15, 0, r2, c7, c10, 4 @ Data synchronization barrier
bx lr @
.size invalidate_dcache_range, .-invalidate_dcache_range
/*
* clean DCache for this range
* forces DCache writeback for the specified range
* void clean_dcache_range(const void *base, unsigned int size);
*/
.section .text, "ax", %progbits
.align 2
.global clean_dcache_range
.type clean_dcache_range, %function
@ MVA format: 31:5 = Modified virtual address, 4:0 = Ignored
clean_dcache_range:
add r1, r0, r1 @ size -> end
cmp r1, r0 @ end <= start?
subhi r1, r1, #1 @ round it down
movhi r2, #0 @
mcrrhi p15, 0, r1, r0, c12 @ Clean DCache range
mcrhi p15, 0, r2, c7, c10, 4 @ Data synchronization barrier
bx lr @
.size clean_dcache_range, .-clean_dcache_range
/*
* Dump DCache for this range
* will *NOT* do write back except for buffer edges not on a line boundary
* void dump_dcache_range(const void *base, unsigned int size);
*/
.section .text, "ax", %progbits
.align 2
.global dump_dcache_range
.type dump_dcache_range, %function
@ MVA format (mcr): 31:5 = Modified virtual address, 4:0 = SBZ
@ MVA format (mcrr): 31:5 = Modified virtual address, 4:0 = Ignored
dump_dcache_range:
add r1, r0, r1 @ size -> end
cmp r1, r0 @ end <= start?
bxls lr @
tst r0, #31 @ Check first line for bits set
bicne r0, r0, #31 @ Clear low five bits (down)
mcrne p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
@ if not cache aligned
addne r0, r0, #32 @ Move to the next cache line
@
tst r1, #31 @ Check last line for bits set
bicne r1, r1, #31 @ Clear low five bits (down)
mcrne p15, 0, r1, c7, c14, 1 @ Clean and invalidate line by MVA
@ if not cache aligned
sub r1, r1, #32 @ Move to the previous cache line
cmp r1, r0 @ end < start now?
mcrrhs p15, 0, r1, r0, c6 @ Invalidate DCache range
mov r0, #0 @
mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
bx lr @
.size dump_dcache_range, .-dump_dcache_range
/*
* Cleans entire DCache
* void clean_dcache(void);
*/
.section .text, "ax", %progbits
.align 2
.global clean_dcache
.type clean_dcache, %function
.global cpucache_flush @ Alias
clean_dcache:
cpucache_flush:
mov r0, #0 @
mcr p15, 0, r0, c7, c10, 0 @ Clean entire DCache
mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
bx lr @
.size clean_dcache, .-clean_dcache
/*
* Invalidate entire DCache
* will do writeback
* void invalidate_dcache(void);
*/
.section .text, "ax", %progbits
.align 2
.global invalidate_dcache
.type invalidate_dcache, %function
invalidate_dcache:
mov r0, #0 @
mcr p15, 0, r0, c7, c14, 0 @ Clean and invalidate entire DCache
mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
bx lr @
.size invalidate_dcache, .-invalidate_dcache
/*
* Invalidate entire ICache and DCache
* will do writeback
* void invalidate_idcache(void);
*/
.section .text, "ax", %progbits
.align 2
.global invalidate_idcache
.type invalidate_idcache, %function
.global cpucache_invalidate @ Alias
invalidate_idcache:
cpucache_invalidate:
mov r0, #0 @
mcr p15, 0, r0, c7, c14, 0 @ Clean and invalidate entire DCache
mcr p15, 0, r0, c7, c5, 0 @ Invalidate entire ICache
@ Also flushes the branch target cache
mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer (IMB)
bx lr @
.size invalidate_idcache, .-invalidate_idcache
#else /* !IMX31L */
/** MMU setup **/
/*
* void ttb_init(void);
*/
.section .text, "ax", %progbits
.align 2
.global ttb_init
.type ttb_init, %function
ttb_init:
ldr r0, =TTB_BASE_ADDR @
mvn r1, #0 @
mcr p15, 0, r0, c2, c0, 0 @ Set the TTB base address
mcr p15, 0, r1, c3, c0, 0 @ Set all domains to manager status
bx lr @
.size ttb_init, .-ttb_init
/*
* void map_section(unsigned int pa, unsigned int va, int mb, int flags);
*/
.section .text, "ax", %progbits
.align 2
.global map_section
.type map_section, %function
map_section:
@ align to 1MB
@ pa &= (-1 << 20);
mov r0, r0, lsr #20
mov r0, r0, lsl #20
@ pa |= (flags | 0x412);
@ bit breakdown:
@ 10: superuser - r/w, user - no access
@ 4: should be "1"
@ 3,2: Cache flags (flags (r3))
@ 1: Section signature
orr r0, r0, r3
orr r0, r0, #0x410
orr r0, r0, #0x2
@ unsigned int* ttbPtr = TTB_BASE + (va >> 20);
@ sections are 1MB size
mov r1, r1, lsr #20
ldr r3, =TTB_BASE_ADDR
add r1, r3, r1, lsl #0x2
@ Add MB to pa, flags are already present in pa, but addition
@ should not effect them
@
@ for( ; mb>0; mb--, pa += (1 << 20))
@ {
@ *(ttbPtr++) = pa;
@ }
cmp r2, #0
bxle lr
mov r3, #0x0
1: @ loop
str r0, [r1], #4
add r0, r0, #0x100000
add r3, r3, #0x1
cmp r2, r3
bne 1b @ loop
bx lr
.size map_section, .-map_section
/*
* void enable_mmu(void);
*/
.section .text, "ax", %progbits
.align 2
.global enable_mmu
.type enable_mmu, %function
enable_mmu:
mov r0, #0 @
mcr p15, 0, r0, c8, c7, 0 @ invalidate TLB
mcr p15, 0, r0, c7, c7,0 @ invalidate both i and dcache
mrc p15, 0, r0, c1, c0, 0 @
orr r0, r0, #1 @ enable mmu bit, i and dcache
orr r0, r0, #1<<2 @ enable dcache
orr r0, r0, #1<<12 @ enable icache
mcr p15, 0, r0, c1, c0, 0 @
nop @
nop @
nop @
nop @
bx lr @
.size enable_mmu, .-enable_mmu
.ltorg
/** Cache coherency **/
/*
* Invalidate DCache for this range
* will do write back
* void invalidate_dcache_range(const void *base, unsigned int size);
*/
.section .text, "ax", %progbits
.align 2
.global invalidate_dcache_range
.type invalidate_dcache_range, %function
@ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
invalidate_dcache_range:
add r1, r0, r1 @ size -> end
cmp r1, r0 @ end <= start?
bxls lr @
bic r0, r0, #31 @ Align start to cache line (down)
1: @ inv_start @
mcr p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
add r0, r0, #32 @
cmp r1, r0 @
mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
bhi 1b @ inv_start @
mov r0, #0 @
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
bx lr @
.size invalidate_dcache_range, .-invalidate_dcache_range
/*
* clean DCache for this range
* forces DCache writeback for the specified range
* void clean_dcache_range(const void *base, unsigned int size);
*/
.section .text, "ax", %progbits
.align 2
.global clean_dcache_range
.type clean_dcache_range, %function
@ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
clean_dcache_range:
add r1, r0, r1 @ size -> end
cmp r1, r0 @ end <= start?
bxls lr @
bic r0, r0, #31 @ Align start to cache line (down)
1: @ clean_start @
mcr p15, 0, r0, c7, c10, 1 @ Clean line by MVA
add r0, r0, #32 @
cmp r1, r0 @
mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
bhi 1b @clean_start @
mov r0, #0 @
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
bx lr @
.size clean_dcache_range, .-clean_dcache_range
/*
* Dump DCache for this range
* will *NOT* do write back except for buffer edges not on a line boundary
* void dump_dcache_range(const void *base, unsigned int size);
*/
.section .text, "ax", %progbits
.align 2
.global dump_dcache_range
.type dump_dcache_range, %function
@ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
dump_dcache_range:
add r1, r0, r1 @ size -> end
cmp r1, r0 @ end <= start?
bxls lr @
tst r0, #31 @ Check first line for bits set
bicne r0, r0, #31 @ Clear low five bits (down)
mcrne p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
@ if not cache aligned
addne r0, r0, #32 @ Move to the next cache line
@
tst r1, #31 @ Check last line for bits set
bicne r1, r1, #31 @ Clear low five bits (down)
mcrne p15, 0, r1, c7, c14, 1 @ Clean and invalidate line by MVA
@ if not cache aligned
cmp r1, r0 @ end <= start now?
1: @ dump_start @
mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
addhi r0, r0, #32 @
cmphi r1, r0 @
bhi 1b @ dump_start @
mov r0, #0 @
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
bx lr @
.size dump_dcache_range, .-dump_dcache_range
/*
* Cleans entire DCache
* void clean_dcache(void);
*/
.section .text, "ax", %progbits
.align 2
.global clean_dcache
.type clean_dcache, %function
.global cpucache_flush @ Alias
clean_dcache:
cpucache_flush:
@ Index format: 31:26 = index, 7:5 = segment, remainder = SBZ
mov r0, #0x00000000 @
1: @ clean_start @
mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
sub r0, r0, #0x000000e0 @
adds r0, r0, #0x04000000 @ will wrap to zero at loop end
bne 1b @ clean_start @
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
bx lr @
.size clean_dcache, .-clean_dcache
/*
* Invalidate entire DCache
* will do writeback
* void invalidate_dcache(void);
*/
.section .text, "ax", %progbits
.align 2
.global invalidate_dcache
.type invalidate_dcache, %function
invalidate_dcache:
@ Index format: 31:26 = index, 7:5 = segment, remainder = SBZ
mov r0, #0x00000000 @
1: @ inv_start @
mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
add r0, r0, #0x00000020 @
mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
sub r0, r0, #0x000000e0 @
adds r0, r0, #0x04000000 @ will wrap to zero at loop end
bne 1b @ inv_start @
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
bx lr @
.size invalidate_dcache, .-invalidate_dcache
/*
* Invalidate entire ICache and DCache
* will do writeback
* void invalidate_idcache(void);
*/
.section .text, "ax", %progbits
.align 2
.global invalidate_idcache
.type invalidate_idcache, %function
.global cpucache_invalidate @ Alias
invalidate_idcache:
cpucache_invalidate:
mov r1, lr @ save lr to r1, call uses r0 only
bl invalidate_dcache @ Clean and invalidate entire DCache
mcr p15, 0, r0, c7, c5, 0 @ Invalidate ICache (r0=0 from call)
mov pc, r1 @
.size invalidate_idcache, .-invalidate_idcache
#endif /* !IMX31L */

View file

@ -1,322 +0,0 @@
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2006,2007 by Greg White
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "cpu.h"
#include "mmu-arm.h"
#include "panic.h"
void __attribute__((naked)) ttb_init(void) {
asm volatile
(
"mcr p15, 0, %[ttbB], c2, c0, 0 \n" /* Set the TTB base address */
"mcr p15, 0, %[ffff], c3, c0, 0 \n" /* Set all domains to manager status */
"bx lr \n"
:
: [ttbB] "r" (TTB_BASE),
[ffff] "r" (0xFFFFFFFF)
);
}
void __attribute__((naked)) map_section(unsigned int pa, unsigned int va, int mb, int flags) {
asm volatile
(
/* pa &= (-1 << 20); // align to 1MB */
"mov r0, r0, lsr #20 \n"
"mov r0, r0, lsl #20 \n"
/* pa |= (flags | 0x412);
* bit breakdown:
* 10: superuser - r/w, user - no access
* 4: should be "1"
* 3,2: Cache flags (flags (r3))
* 1: Section signature
*/
"orr r0, r0, r3 \n"
"orr r0, r0, #0x410 \n"
"orr r0, r0, #0x2 \n"
:
:
);
register unsigned long *ttb_base asm ("r3") = TTB_BASE; /* force in r3 */
asm volatile
(
/* unsigned int* ttbPtr = TTB_BASE + (va >> 20);
* sections are 1MB size
*/
"mov r1, r1, lsr #20 \n"
"add r1, %[ttbB], r1, lsl #0x2 \n"
/* Add MB to pa, flags are already present in pa, but addition
* should not effect them
*
* #define MB (1 << 20)
* for( ; mb>0; mb--, pa += MB)
* {
* *(ttbPtr++) = pa;
* }
* #undef MB
*/
"cmp r2, #0 \n"
"bxle lr \n"
"mov r3, #0x0 \n"
"loop: \n"
"str r0, [r1], #4 \n"
"add r0, r0, #0x100000 \n"
"add r3, r3, #0x1 \n"
"cmp r2, r3 \n"
"bne loop \n"
"bx lr \n"
:
: [ttbB] "r" (ttb_base) /* This /HAS/ to be in r3 */
);
(void) pa;
(void) va;
(void) mb;
(void) flags;
}
void __attribute__((naked)) enable_mmu(void) {
asm volatile(
"mov r0, #0 \n"
"mcr p15, 0, r0, c8, c7, 0 \n" /* invalidate TLB */
"mcr p15, 0, r0, c7, c7,0 \n" /* invalidate both icache and dcache */
"mrc p15, 0, r0, c1, c0, 0 \n"
"orr r0, r0, #1 \n" /* enable mmu bit, icache and dcache */
"orr r0, r0, #1<<2 \n" /* enable dcache */
"orr r0, r0, #1<<12 \n" /* enable icache */
"mcr p15, 0, r0, c1, c0, 0 \n"
"nop \n"
"nop \n"
"nop \n"
"nop \n"
"bx lr \n"
:
:
: "r0"
);
}
#if CONFIG_CPU == IMX31L
void __attribute__((naked)) invalidate_dcache_range(const void *base, unsigned int size)
{
asm volatile(
"add r1, r1, r0 \n"
"mov r2, #0 \n"
"mcrr p15, 0, r1, r0, c14 \n" /* Clean and invalidate dcache range */
"mcr p15, 0, r2, c7, c10, 4 \n" /* Data synchronization barrier */
"bx lr \n"
);
(void)base; (void)size;
}
#else
/* Invalidate DCache for this range */
/* Will do write back */
void invalidate_dcache_range(const void *base, unsigned int size) {
unsigned int addr = (((int) base) & ~31); /* Align start to cache line*/
unsigned int end = ((addr+size) & ~31)+64; /* Align end to cache line, pad */
asm volatile(
"inv_start: \n"
"mcr p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
"add %0, %0, #32 \n"
"cmp %0, %1 \n"
"mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
"addne %0, %0, #32 \n"
"cmpne %0, %1 \n"
"mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
"addne %0, %0, #32 \n"
"cmpne %0, %1 \n"
"mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
"addne %0, %0, #32 \n"
"cmpne %0, %1 \n"
"mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
"addne %0, %0, #32 \n"
"cmpne %0, %1 \n"
"mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
"addne %0, %0, #32 \n"
"cmpne %0, %1 \n"
"mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
"addne %0, %0, #32 \n"
"cmpne %0, %1 \n"
"mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line */
"addne %0, %0, #32 \n"
"cmpne %0, %1 \n"
"bne inv_start \n"
"mov %0, #0\n"
"mcr p15,0,%0,c7,c10,4\n" /* Drain write buffer */
: : "r" (addr), "r" (end)
);
}
#endif
#if CONFIG_CPU == IMX31L
void __attribute__((naked)) clean_dcache_range(const void *base, unsigned int size)
{
asm volatile(
"add r1, r1, r0 \n"
"mov r2, #0 \n"
"mcrr p15, 0, r1, r0, c12 \n" /* Clean dcache range */
"mcr p15, 0, r2, c7, c10, 4 \n" /* Data synchronization barrier */
"bx lr \n"
);
(void)base; (void)size;
}
#else
/* clean DCache for this range */
/* forces DCache writeback for the specified range */
void clean_dcache_range(const void *base, unsigned int size) {
unsigned int addr = (int) base;
unsigned int end = addr+size+32;
asm volatile(
"bic %0, %0, #31 \n"
"clean_start: \n"
"mcr p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
"add %0, %0, #32 \n"
"cmp %0, %1 \n"
"mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
"addlo %0, %0, #32 \n"
"cmplo %0, %1 \n"
"mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
"addlo %0, %0, #32 \n"
"cmplo %0, %1 \n"
"mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
"addlo %0, %0, #32 \n"
"cmplo %0, %1 \n"
"mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
"addlo %0, %0, #32 \n"
"cmplo %0, %1 \n"
"mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
"addlo %0, %0, #32 \n"
"cmplo %0, %1 \n"
"mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
"addlo %0, %0, #32 \n"
"cmplo %0, %1 \n"
"mcrlo p15, 0, %0, c7, c10, 1 \n" /* Clean this line */
"addlo %0, %0, #32 \n"
"cmplo %0, %1 \n"
"blo clean_start \n"
"mov %0, #0\n"
"mcr p15,0,%0,c7,c10,4 \n" /* Drain write buffer */
: : "r" (addr), "r" (end));
}
#endif
#if CONFIG_CPU == IMX31L
void __attribute__((naked)) dump_dcache_range(const void *base, unsigned int size)
{
asm volatile(
"add r1, r1, r0 \n"
"mcrr p15, 0, r1, r0, c6 \n"
"bx lr \n"
);
(void)base; (void)size;
}
#else
/* Dump DCache for this range */
/* Will *NOT* do write back */
void dump_dcache_range(const void *base, unsigned int size) {
unsigned int addr = (int) base;
unsigned int end = addr+size;
asm volatile(
"tst %0, #31 \n" /* Check to see if low five bits are set */
"bic %0, %0, #31 \n" /* Clear them */
"mcrne p15, 0, %0, c7, c14, 1 \n" /* Clean and invalidate this line, if those bits were set */
"add %0, %0, #32 \n" /* Move to the next cache line */
"tst %1, #31 \n" /* Check last line for bits set */
"bic %1, %1, #31 \n" /* Clear those bits */
"mcrne p15, 0, %1, c7, c14, 1 \n" /* Clean and invalidate this line, if not cache aligned */
"dump_start: \n"
"mcr p15, 0, %0, c7, c6, 1 \n" /* Invalidate this line */
"add %0, %0, #32 \n" /* Next cache line */
"cmp %0, %1 \n"
"bne dump_start \n"
"dump_end: \n"
"mcr p15,0,%0,c7,c10,4 \n" /* Drain write buffer */
: : "r" (addr), "r" (end));
}
#endif
#if CONFIG_CPU == IMX31L
void __attribute__((naked)) clean_dcache(void)
{
asm volatile (
/* Clean entire data cache */
"mov r0, #0 \n"
"mcr p15, 0, r0, c7, c10, 0 \n"
/* Data synchronization barrier */
"mcr p15, 0, r0, c7, c10, 4 \n"
"bx lr \n"
);
}
#else
/* Cleans entire DCache */
void clean_dcache(void)
{
unsigned int index, addr, low;
for(index = 0; index <= 63; index++)
{
for(low = 0;low <= 7; low++)
{
addr = (index << 26) | (low << 5);
asm volatile
(
"mcr p15, 0, %[addr], c7, c10, 2 \n" /* Clean this entry by index */
:
: [addr] "r" (addr)
);
}
}
}
#endif
#if CONFIG_CPU == IMX31L
void invalidate_idcache(void)
{
asm volatile(
/* Clean and invalidate entire data cache */
"mcr p15, 0, %0, c7, c14, 0 \n"
/* Invalidate entire instruction cache
* Also flushes the branch target cache */
"mcr p15, 0, %0, c7, c5, 0 \n"
/* Data synchronization barrier */
"mcr p15, 0, %0, c7, c10, 4 \n"
/* Flush prefetch buffer */
"mcr p15, 0, %0, c7, c5, 4 \n"
: : "r"(0)
);
}
#else
void invalidate_idcache(void)
{
clean_dcache();
asm volatile(
"mov r0, #0 \n"
"mcr p15, 0, r0, c7, c5, 0 \n"
: : : "r0"
);
}
#endif

View file

@ -18,11 +18,14 @@
* KIND, either express or implied.
*
****************************************************************************/
#ifndef MMU_ARM_H
#define MMY_ARM_H
#define CACHE_ALL 0x0C
#define CACHE_NONE 0
#define BUFFERED 0x04
void memory_init(void);
void ttb_init(void);
void enable_mmu(void);
void map_section(unsigned int pa, unsigned int va, int mb, int flags);
@ -30,8 +33,12 @@ void map_section(unsigned int pa, unsigned int va, int mb, int flags);
/* Cleans entire DCache */
void clean_dcache(void);
/* Invalidate entire DCache */
/* will do writeback */
void invalidate_dcache(void);
/* Invalidate DCache for this range */
/* Will do write back */
/* will do writeback */
void invalidate_dcache_range(const void *base, unsigned int size);
/* clean DCache for this range */
@ -39,7 +46,14 @@ void invalidate_dcache_range(const void *base, unsigned int size);
void clean_dcache_range(const void *base, unsigned int size);
/* Dump DCache for this range */
/* Will *NOT* do write back */
/* Will *NOT* do write back except for buffer ends not on a line boundary */
void dump_dcache_range(const void *base, unsigned int size);
void memory_init(void);
/* Invalidate entire ICache and DCache */
/* will do writeback */
void invalidate_idcache(void);
#define HAVE_CPUCACHE_INVALIDATE
#define HAVE_CPUCACHE_FLUSH
#endif /* MMU_ARM_H */

View file

@ -41,9 +41,4 @@ void s3c_regset32(volatile unsigned long *reg, unsigned long bits);
/* Clear register bits */
void s3c_regclr32(volatile unsigned long *reg, unsigned long bits);
#define HAVE_CPUCACHE_FLUSH
#define HAVE_CPUCACHE_INVALIDATE
#define cpucache_flush clean_dcache
#define cpucache_invalidate invalidate_idcache
#endif /* SYSTEM_TARGET_H */