arch/arm/mmu: unify all uint32_t & uintptr_t

Only keep cp15 API use uint32_t, other mmu all use uintptr_t

Signed-off-by: buxiasen <buxiasen@xiaomi.com>
This commit is contained in:
buxiasen 2025-04-30 16:52:42 +08:00 committed by Xiang Xiao
parent 1de87953e6
commit 7b90b78b52
10 changed files with 57 additions and 57 deletions

View file

@ -135,7 +135,7 @@ static void a1x_vectorpermissions(uint32_t mmuflags)
{
/* The PTE for the beginning of ISRAM is at the base of the L2 page table */
uint32_t pte = mmu_l2_getentry(PG_L2_VECT_VADDR, 0);
uintptr_t pte = mmu_l2_getentry(PG_L2_VECT_VADDR, 0);
/* Mask out the old MMU flags from the page table entry.
*

View file

@ -207,7 +207,7 @@ static void am335x_vectorpermissions(uint32_t mmuflags)
* table
*/
uint32_t pte = mmu_l2_getentry(PG_L2_VECT_VADDR, 0);
uintptr_t pte = mmu_l2_getentry(PG_L2_VECT_VADDR, 0);
/* Mask out the old MMU flags from the page table entry.
*

View file

@ -71,7 +71,7 @@ int up_shmat(uintptr_t *pages, unsigned int npages, uintptr_t vaddr)
struct tcb_s *tcb = this_task();
struct arch_addrenv_s *addrenv;
uintptr_t *l1entry;
uint32_t *l2table;
uintptr_t *l2table;
irqstate_t flags;
uintptr_t paddr;
unsigned int nmapped;
@ -121,11 +121,11 @@ int up_shmat(uintptr_t *pages, unsigned int npages, uintptr_t vaddr)
* address.
*/
l2table = (uint32_t *)arm_pgvaddr(paddr);
l2table = (uintptr_t *)arm_pgvaddr(paddr);
/* Initialize the page table */
memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t));
memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uintptr_t));
/* In case first time set shm l1 entry */
@ -144,7 +144,7 @@ int up_shmat(uintptr_t *pages, unsigned int npages, uintptr_t vaddr)
* address.
*/
l2table = (uint32_t *)arm_pgvaddr(paddr);
l2table = (uintptr_t *)arm_pgvaddr(paddr);
}
/* Map the virtual address to this physical address */
@ -165,7 +165,7 @@ int up_shmat(uintptr_t *pages, unsigned int npages, uintptr_t vaddr)
up_flush_dcache((uintptr_t)l2table,
(uintptr_t)l2table +
ENTRIES_PER_L2TABLE * sizeof(uint32_t));
ENTRIES_PER_L2TABLE * sizeof(uintptr_t));
leave_critical_section(flags);
}
@ -195,7 +195,7 @@ int up_shmdt(uintptr_t vaddr, unsigned int npages)
struct tcb_s *tcb = this_task();
struct arch_addrenv_s *addrenv;
uintptr_t *l1entry;
uint32_t *l2table;
uintptr_t *l2table;
irqstate_t flags;
uintptr_t paddr;
unsigned int nunmapped;
@ -235,7 +235,7 @@ int up_shmdt(uintptr_t vaddr, unsigned int npages)
* address.
*/
l2table = (uint32_t *)arm_pgvaddr(paddr);
l2table = (uintptr_t *)arm_pgvaddr(paddr);
/* Unmap this virtual page address.
*

View file

@ -63,7 +63,7 @@ int arm_addrenv_create_region(uintptr_t **list, unsigned int listlen,
{
irqstate_t flags;
uintptr_t paddr;
uint32_t *l2table;
uintptr_t *l2table;
size_t nmapped;
unsigned int npages;
unsigned int nlist;
@ -113,11 +113,11 @@ int arm_addrenv_create_region(uintptr_t **list, unsigned int listlen,
/* Get the virtual address corresponding to the physical page address */
l2table = (uint32_t *)arm_pgvaddr(paddr);
l2table = (uintptr_t *)arm_pgvaddr(paddr);
/* Initialize the page table */
memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t));
memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uintptr_t));
/* Back up L2 entries with physical memory */
@ -146,7 +146,7 @@ int arm_addrenv_create_region(uintptr_t **list, unsigned int listlen,
up_flush_dcache((uintptr_t)l2table,
(uintptr_t)l2table +
ENTRIES_PER_L2TABLE * sizeof(uint32_t));
ENTRIES_PER_L2TABLE * sizeof(uintptr_t));
leave_critical_section(flags);
}

View file

@ -52,10 +52,10 @@
****************************************************************************/
#ifndef CONFIG_ARCH_ROMPGTABLE
void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags)
void mmu_l1_setentry(uintptr_t paddr, uintptr_t vaddr, uint32_t mmuflags)
{
uint32_t *l1table = mmu_l1_getpgtable();
uint32_t index = vaddr >> 20;
uintptr_t *l1table = mmu_l1_getpgtable();
uint32_t index = vaddr >> 20;
/* Save the page table entry */
@ -87,10 +87,10 @@ void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags)
****************************************************************************/
#if !defined(CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
void mmu_l1_restore(uintptr_t vaddr, uint32_t l1entry)
void mmu_l1_restore(uintptr_t vaddr, uintptr_t l1entry)
{
uint32_t *l1table = mmu_l1_getpgtable();
uint32_t index = vaddr >> 20;
uintptr_t *l1table = mmu_l1_getpgtable();
uint32_t index = vaddr >> 20;
/* Set the encoded page table entry */
@ -126,11 +126,11 @@ void mmu_l1_restore(uintptr_t vaddr, uint32_t l1entry)
****************************************************************************/
#ifndef CONFIG_ARCH_ROMPGTABLE
void mmu_l2_setentry(uint32_t l2vaddr, uint32_t paddr, uint32_t vaddr,
void mmu_l2_setentry(uintptr_t l2vaddr, uintptr_t paddr, uintptr_t vaddr,
uint32_t mmuflags)
{
uint32_t *l2table = (uint32_t *)l2vaddr;
uint32_t index;
uintptr_t *l2table = (uintptr_t *)l2vaddr;
uint32_t index;
/* The table divides a 1Mb address space up into 256 entries, each
* corresponding to 4Kb of address space. The page table index is
@ -227,8 +227,8 @@ void mmu_l1_map_regions(const struct section_mapping_s *mappings,
#ifndef CONFIG_ARCH_ROMPGTABLE
void mmu_l1_map_page(const struct section_mapping_s *mapping)
{
uint32_t virtaddr = mapping->virtbase;
uint32_t l2table = mapping->physbase;
uintptr_t virtaddr = mapping->virtbase;
uintptr_t l2table = mapping->physbase;
uint32_t i;
for (i = 0; i < mapping->nsections; i++)
@ -352,7 +352,7 @@ void mmu_l2_map_pages(const struct page_mapping_s *mappings,
****************************************************************************/
#ifndef CONFIG_ARCH_ROMPGTABLE
void mmu_invalidate_region(uint32_t vstart, size_t size)
void mmu_invalidate_region(uintptr_t vstart, size_t size)
{
uint32_t vaddr = vstart & 0xfffff000;
uint32_t vend = vstart + size;

View file

@ -57,9 +57,9 @@
uintptr_t up_addrenv_va_to_pa(void *va)
{
uintptr_t vaddr = (uintptr_t)va;
uint32_t *l2table;
uintptr_t *l2table;
uintptr_t paddr;
uint32_t l1entry;
uintptr_t l1entry;
int index;
/* Check if this address is within the range of one of the virtualized user
@ -79,11 +79,11 @@ uintptr_t up_addrenv_va_to_pa(void *va)
* level 1 page table entry.
*/
paddr = ((uintptr_t)l1entry & PMD_PTE_PADDR_MASK);
paddr = (l1entry & PMD_PTE_PADDR_MASK);
/* Get the virtual address of the base of level 2 page table */
l2table = (uint32_t *)arm_pgvaddr(paddr);
l2table = (uintptr_t *)arm_pgvaddr(paddr);
if (l2table)
{
@ -101,7 +101,7 @@ uintptr_t up_addrenv_va_to_pa(void *va)
* containing the mapping of the virtual address.
*/
paddr = ((uintptr_t)l2table[index] & PTE_SMALL_PADDR_MASK);
paddr = l2table[index] & PTE_SMALL_PADDR_MASK;
/* Add the correct offset and return the physical address
* corresponding to the virtual address.

View file

@ -914,24 +914,24 @@
struct section_mapping_s
{
uint32_t physbase; /* Physical address of the region to be mapped */
uint32_t virtbase; /* Virtual address of the region to be mapped */
uint32_t mmuflags; /* MMU settings for the region (e.g., cache-able) */
uint32_t nsections; /* Number of mappings in the region */
uintptr_t physbase; /* Physical address of the region to be mapped */
uintptr_t virtbase; /* Virtual address of the region to be mapped */
uint32_t mmuflags; /* MMU settings for the region (e.g., cache-able) */
uint32_t nsections; /* Number of mappings in the region */
};
struct page_entry_s
{
uint32_t physbase; /* Physical address of the region to be mapped */
uint32_t virtbase; /* Virtual address of the region to be mapped */
uint32_t mmuflags; /* MMU settings for the region (e.g., cache-able) */
uint32_t npages; /* Number of mappings in the region */
uintptr_t physbase; /* Physical address of the region to be mapped */
uintptr_t virtbase; /* Virtual address of the region to be mapped */
uint32_t mmuflags; /* MMU settings for the region (e.g., cache-able) */
uint32_t npages; /* Number of mappings in the region */
};
struct page_mapping_s
{
uint32_t l2table; /* Virtual address of l2 table */
uint32_t entrynum; /* Page entry number */
uintptr_t l2table; /* Virtual address of l2 table */
uint32_t entrynum; /* Page entry number */
const struct page_entry_s *entry; /* Page entry */
};
#endif
@ -1351,7 +1351,7 @@ static inline void cp15_wrttb(uint32_t ttb)
****************************************************************************/
#ifndef CONFIG_ARCH_ROMPGTABLE
static inline uint32_t *mmu_l1_getpgtable(void)
static inline uintptr_t *mmu_l1_getpgtable(void)
{
#if defined(CONFIG_SMP) && defined(CONFIG_ARCH_ADDRENV)
uint32_t ttbr0;
@ -1359,9 +1359,9 @@ static inline uint32_t *mmu_l1_getpgtable(void)
ttbr0 = CP15_GET(TTBR0);
pgtable = ttbr0 & TTBR0_BASE_MASK(0);
return (uint32_t *)(pgtable - PGTABLE_BASE_PADDR + PGTABLE_BASE_VADDR);
return (uintptr_t *)(pgtable - PGTABLE_BASE_PADDR + PGTABLE_BASE_VADDR);
#else
return (uint32_t *)PGTABLE_BASE_VADDR;
return (uintptr_t *)PGTABLE_BASE_VADDR;
#endif
}
#endif
@ -1404,7 +1404,7 @@ static inline void mmu_l1_setpgtable(uintptr_t *ttb)
#ifndef CONFIG_ARCH_ROMPGTABLE
static inline
uint32_t mmu_l1table_getentry(uint32_t *l1table, uint32_t vaddr)
uintptr_t mmu_l1table_getentry(uintptr_t *l1table, uintptr_t vaddr)
{
uint32_t index = vaddr >> 20;
@ -1413,7 +1413,7 @@ uint32_t mmu_l1table_getentry(uint32_t *l1table, uint32_t vaddr)
return l1table[index];
}
static inline uint32_t mmu_l1_getentry(uint32_t vaddr)
static inline uintptr_t mmu_l1_getentry(uintptr_t vaddr)
{
return mmu_l1table_getentry(mmu_l1_getpgtable(), vaddr);
}
@ -1433,10 +1433,10 @@ static inline uint32_t mmu_l1_getentry(uint32_t vaddr)
****************************************************************************/
#ifndef CONFIG_ARCH_ROMPGTABLE
static inline uint32_t mmu_l2_getentry(uint32_t l2vaddr, uint32_t vaddr)
static inline uintptr_t mmu_l2_getentry(uintptr_t l2vaddr, uintptr_t vaddr)
{
uint32_t *l2table = (uint32_t *)l2vaddr;
uint32_t index;
uintptr_t *l2table = (uintptr_t *)l2vaddr;
uint32_t index;
/* The table divides a 1Mb address space up into 256 entries, each
* corresponding to 4Kb of address space. The page table index is
@ -1487,7 +1487,7 @@ extern "C"
****************************************************************************/
#ifndef CONFIG_ARCH_ROMPGTABLE
void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags);
void mmu_l1_setentry(uintptr_t paddr, uintptr_t vaddr, uint32_t mmuflags);
#endif
/****************************************************************************
@ -1504,7 +1504,7 @@ void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags);
****************************************************************************/
#if !defined(CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
void mmu_l1_restore(uintptr_t vaddr, uint32_t l1entry);
void mmu_l1_restore(uintptr_t vaddr, uintptr_t l1entry);
#endif
/****************************************************************************
@ -1541,7 +1541,7 @@ void mmu_l1_restore(uintptr_t vaddr, uint32_t l1entry);
****************************************************************************/
#ifndef CONFIG_ARCH_ROMPGTABLE
void mmu_l2_setentry(uint32_t l2vaddr, uint32_t paddr, uint32_t vaddr,
void mmu_l2_setentry(uintptr_t l2vaddr, uintptr_t paddr, uintptr_t vaddr,
uint32_t mmuflags);
#endif
@ -1660,7 +1660,7 @@ void mmu_l2_map_pages(const struct page_mapping_s *mappings,
****************************************************************************/
#ifndef CONFIG_ARCH_ROMPGTABLE
void mmu_invalidate_region(uint32_t vstart, size_t size);
void mmu_invalidate_region(uintptr_t vstart, size_t size);
#endif
#undef EXTERN

View file

@ -109,7 +109,7 @@ static inline bool arm_uservaddr(uintptr_t vaddr)
*
****************************************************************************/
static inline void set_l2_entry(uint32_t *l2table, uintptr_t paddr,
static inline void set_l2_entry(uintptr_t *l2table, uintptr_t paddr,
uintptr_t vaddr, uint32_t mmuflags)
{
uint32_t index;
@ -134,7 +134,7 @@ static inline void set_l2_entry(uint32_t *l2table, uintptr_t paddr,
*
****************************************************************************/
static inline void clr_l2_entry(uint32_t *l2table, uintptr_t vaddr)
static inline void clr_l2_entry(uintptr_t *l2table, uintptr_t vaddr)
{
uint32_t index;
@ -159,7 +159,7 @@ static inline void clr_l2_entry(uint32_t *l2table, uintptr_t vaddr)
*
****************************************************************************/
static inline uintptr_t get_l2_entry(uint32_t *l2table, uintptr_t vaddr)
static inline uintptr_t get_l2_entry(uintptr_t *l2table, uintptr_t vaddr)
{
uint32_t index;

View file

@ -118,7 +118,7 @@ static void imx_vectorpermissions(uint32_t mmuflags)
{
/* The PTE for the beginning of OCRAM is at the base of the L2 page table */
uint32_t pte = mmu_l2_getentry(PG_L2_VECT_VADDR, 0);
uintptr_t pte = mmu_l2_getentry(PG_L2_VECT_VADDR, 0);
/* Mask out the old MMU flags from the page table entry.
*

View file

@ -109,7 +109,7 @@ static void sam_vectorpermissions(uint32_t mmuflags)
{
/* The PTE for the beginning of ISRAM is at the base of the L2 page table */
uint32_t pte = mmu_l2_getentry(PG_L2_VECT_VADDR, 0);
uintptr_t pte = mmu_l2_getentry(PG_L2_VECT_VADDR, 0);
/* Mask out the old MMU flags from the page table entry.
*