arm64: simply the vectors

Signed-off-by: ligd <liguiding1@xiaomi.com>
This commit is contained in:
ligd 2024-02-04 21:37:50 +08:00 committed by Xiang Xiao
parent e38f2b2a6d
commit 370679c65b
3 changed files with 131 additions and 311 deletions

View file

@ -312,8 +312,7 @@ void arm64_pginitialize(void);
# define arm64_pginitialize()
#endif /* CONFIG_LEGACY_PAGING */
uint64_t * arm64_syscall_switch(uint64_t *regs);
int arm64_syscall(uint64_t *regs);
uint64_t *arm64_syscall(uint64_t *regs);
/* Low level serial output **************************************************/

View file

@ -53,18 +53,18 @@ typedef uintptr_t (*syscall_t)(unsigned int, ...);
****************************************************************************/
static void arm64_dump_syscall(const char *tag, uint64_t cmd,
const struct regs_context * f_regs)
const uint64_t *regs)
{
svcinfo("SYSCALL %s: regs: %p cmd: %" PRId64 "\n", tag, f_regs, cmd);
svcinfo("SYSCALL %s: regs: %p cmd: %" PRId64 "\n", tag, regs, cmd);
svcinfo("x0: 0x%-16lx x1: 0x%lx\n",
f_regs->regs[REG_X0], f_regs->regs[REG_X1]);
regs[REG_X0], regs[REG_X1]);
svcinfo("x2: 0x%-16lx x3: 0x%lx\n",
f_regs->regs[REG_X2], f_regs->regs[REG_X3]);
regs[REG_X2], regs[REG_X3]);
svcinfo("x4: 0x%-16lx x5: 0x%lx\n",
f_regs->regs[REG_X4], f_regs->regs[REG_X5]);
regs[REG_X4], regs[REG_X5]);
svcinfo("x6: 0x%-16lx x7: 0x%lx\n",
f_regs->regs[REG_X6], f_regs->regs[REG_X7]);
regs[REG_X6], regs[REG_X7]);
}
#ifdef CONFIG_LIB_SYSCALL
@ -145,32 +145,32 @@ uintptr_t dispatch_syscall(unsigned int nbr, uintptr_t parm1,
#endif
/****************************************************************************
* Name: arm64_syscall_switch
* Name: arm64_syscall
*
* Description:
* task switch syscall
*
****************************************************************************/
uint64_t *arm64_syscall_switch(uint64_t * regs)
uint64_t *arm64_syscall(uint64_t *regs)
{
uint64_t *ret_regs = regs;
uint64_t cmd;
struct regs_context *f_regs;
uint64_t *ret_regs;
struct tcb_s *tcb;
int cpu;
#ifdef CONFIG_BUILD_KERNEL
uint64_t spsr;
#endif
/* Nested interrupts are not supported */
DEBUGASSERT(regs);
f_regs = (struct regs_context *)regs;
/* The SYSCALL command is in x0 on entry. Parameters follow in x1..x7 */
cmd = f_regs->regs[REG_X0];
cmd = regs[REG_X0];
arm64_dump_syscall(__func__, cmd, f_regs);
arm64_dump_syscall(__func__, cmd, regs);
switch (cmd)
{
@ -192,7 +192,7 @@ uint64_t *arm64_syscall_switch(uint64_t * regs)
* set will determine the restored context.
*/
ret_regs = (uint64_t *)f_regs->regs[REG_X1];
ret_regs = (uint64_t *)regs[REG_X1];
DEBUGASSERT(ret_regs);
}
@ -216,85 +216,13 @@ uint64_t *arm64_syscall_switch(uint64_t * regs)
case SYS_switch_context:
{
DEBUGASSERT(f_regs->regs[REG_X1] != 0 &&
f_regs->regs[REG_X2] != 0);
*(uint64_t **)f_regs->regs[REG_X1] = regs;
DEBUGASSERT(regs[REG_X1] != 0 && regs[REG_X2] != 0);
*(uint64_t **)regs[REG_X1] = regs;
ret_regs = (uint64_t *) f_regs->regs[REG_X2];
ret_regs = (uint64_t *)regs[REG_X2];
}
break;
default:
{
svcerr("ERROR: Bad SYS call: 0x%" PRIx64 "\n", cmd);
ret_regs = 0;
return 0;
}
break;
}
if ((uint64_t *)f_regs != ret_regs)
{
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
addrenv_switch(NULL);
#endif
/* Record the new "running" task. g_running_tasks[] is only used by
* assertion logic for reporting crashes.
*/
cpu = this_cpu();
tcb = current_task(cpu);
g_running_tasks[cpu] = tcb;
/* Restore the cpu lock */
restore_critical_section(tcb, cpu);
}
return ret_regs;
}
/****************************************************************************
* Name: arm64_syscall
*
* Description:
* SVC interrupts will vector here with insn=the SVC instruction and
* xcp=the interrupt context
*
* The handler may get the SVC number be de-referencing the return
* address saved in the xcp and decoding the SVC instruction
*
****************************************************************************/
int arm64_syscall(uint64_t *regs)
{
uint64_t cmd;
struct regs_context *f_regs;
#ifdef CONFIG_BUILD_KERNEL
uint64_t spsr;
#endif
/* Nested interrupts are not supported */
DEBUGASSERT(regs);
f_regs = (struct regs_context *)regs;
/* The SYSCALL command is in x0 on entry. Parameters follow in x1..x7 */
cmd = f_regs->regs[REG_X0];
arm64_dump_syscall(__func__, cmd, f_regs);
switch (cmd)
{
#ifdef CONFIG_BUILD_KERNEL
/* R0=SYS_signal_handler: This a user signal handler callback
*
@ -396,11 +324,59 @@ int arm64_syscall(uint64_t *regs)
break;
#endif
default:
/* This is not an architecture-specific system call. If NuttX is built
* as a standalone kernel with a system call interface, then all of the
* additional system calls must be handled as in the default case.
*/
DEBUGPANIC();
break;
default:
{
#ifdef CONFIG_LIB_SYSCALL
/* Verify that the SYS call number is within range */
DEBUGASSERT(cmd >= CONFIG_SYS_RESERVED && cmd < SYS_maxsyscall);
/* Make sure that there is a no saved SYSCALL return address. We
* cannot yet handle nested system calls.
*/
regs[REG_X0] = dispatch_syscall(regs[REG_X0], regs[REG_X1],
regs[REG_X2], regs[REG_X3],
regs[REG_X4], regs[REG_X5],
regs[REG_X6]);
#else
svcerr("ERROR: Bad SYS call: 0x%" PRIx64 "\n", cmd);
#endif
}
break;
}
return 0;
if (regs != ret_regs)
{
#ifdef CONFIG_ARCH_ADDRENV
/* Make sure that the address environment for the previously
* running task is closed down gracefully (data caches dump,
* MMU flushed) and set up the address environment for the new
* thread at the head of the ready-to-run list.
*/
addrenv_switch(NULL);
#endif
/* Record the new "running" task. g_running_tasks[] is only used by
* assertion logic for reporting crashes.
*/
cpu = this_cpu();
tcb = current_task(cpu);
g_running_tasks[cpu] = tcb;
/* Restore the cpu lock */
restore_critical_section(tcb, cpu);
}
return ret_regs;
}

View file

@ -139,163 +139,32 @@ SECTION_FUNC(text, arm64_jump_to_user)
GTEXT(arm64_sync_exc)
SECTION_FUNC(text, arm64_sync_exc)
/* checking the EC value to see which exception need to be handle */
/* Switch to IRQ stack and save current sp on it. */
#ifdef CONFIG_SMP
get_cpu_id x0
ldr x1, =(g_cpu_int_stacktop)
lsl x0, x0, #3
ldr x1, [x1, x0]
#else
ldr x1, =(g_interrupt_stack + CONFIG_ARCH_INTERRUPTSTACK)
#endif
mov x0, sp
mov sp, x1
#if CONFIG_ARCH_ARM64_EXCEPTION_LEVEL == 3
mrs x0, esr_el3
mrs x4, esr_el3
#else
mrs x0, esr_el1
mrs x4, esr_el1
#endif
lsr x1, x0, #26
#ifdef CONFIG_ARCH_FPU
/* fpu trap */
lsr x5, x4, #26
cmp x5, #0x15
bne arm64_fatal_handler
cmp x1, #0x07 /*Access to SIMD or floating-point */
bne 1f
mov x0, sp
/* when the fpu trap is handled */
bl arm64_syscall /* Call the handler */
b arm64_exit_exception
1:
#endif
/* 0x15 = SVC system call */
cmp x1, #0x15
/* if this is a svc call ?*/
bne exc_handle
#ifdef CONFIG_LIB_SYSCALL
/* Handle user system calls separately */
cmp x0, #CONFIG_SYS_RESERVED
blt reserved_syscall
/* Call dispatch_syscall() on the kernel stack with interrupts enabled */
mrs x10, spsr_el1
and x10, x10, #IRQ_SPSR_MASK
cmp x10, xzr
bne 1f
msr daifclr, #IRQ_DAIF_MASK /* Re-enable interrupts */
1:
bl dispatch_syscall
msr daifset, #IRQ_DAIF_MASK /* Disable interrupts */
/* Save the return value into the user context */
str x0, [sp, #8 * REG_X0]
/* Return from exception */
b arm64_exit_exception
reserved_syscall:
#endif
/* x0 = syscall_cmd
* if ( x0 <= SYS_switch_context ) {
* call context_switch
* it's a context switch syscall, so context need to be done
* }
* #define SYS_save_context (0)
* #define SYS_restore_context (1)
* #define SYS_switch_context (2)
*/
ldr x0, [sp, #8 * REG_X0]
cmp x0, #SYS_save_context
beq save_context
cmp x0, #SYS_switch_context
beq context_switch
cmp x0, #SYS_restore_context
beq context_switch
/* Normal syscall, thread context will not switch
*
* call the SVC handler with interrupts disabled.
* void arm64_syscall(uint64_t *regs)
* in:
* regs = pointer to struct reg_context allocating
* from stack, esf_reg has put on it
* regs[REG_X0]: syscall cmd
* regs[REG_X1] ~ regs[REG_X6]: syscall parameter
* out:
* x0: return by arm64_syscall
*/
mov x0, sp /* x0 = reg frame */
/* Call arm64_syscall() on the user stack */
bl arm64_syscall /* Call the handler */
/* Return from exception */
b arm64_exit_exception
context_switch:
/* Call arm64_syscall_switch() for context switch
*
* uint64_t * arm64_syscall_switch(uint64_t * regs)
* out:
* x0: return by arm64_syscall_switch, restore task context
* regs[REG_X1]: save task context, if x1 = 0, only restore x0
*/
mov x0, sp
str x0, [sp, #-16]!
bl arm64_syscall_switch
/* get save task reg context pointer */
ldr x1, [sp], #16
#ifdef CONFIG_SMP
/* Notes:
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
* This full barrier is also required by the membarrier system
* call.
*/
dsb ish
#endif
b arm64_exit_exception
save_context:
arm64_exception_context_save x0 x1 sp
mov x0, sp
bl arm64_syscall_save_context
/* Save the return value into the ESF */
str x0, [sp, #8 * REG_X0]
/* Return from exception */
b arm64_exit_exception
exc_handle:
mov x0, sp
/* void arm64_fatal_handler(struct regs_context * reg);
* x0 = Exception stack frame
*/
bl arm64_fatal_handler
/* Return here only in case of recoverable error */
b arm64_exit_exception
/****************************************************************************
* Name: arm64_irq_handler
@ -309,20 +178,16 @@ GTEXT(arm64_irq_handler)
SECTION_FUNC(text, arm64_irq_handler)
/* Switch to IRQ stack and save current sp on it. */
#ifdef CONFIG_SMP
get_cpu_id x1
ldr x0, =(g_cpu_int_stacktop)
lsl x1, x1, #3
ldr x0, [x0, x1]
get_cpu_id x0
ldr x1, =(g_cpu_int_stacktop)
lsl x0, x0, #3
ldr x1, [x1, x0]
#else
ldr x0, =(g_interrupt_stack + CONFIG_ARCH_INTERRUPTSTACK)
ldr x1, =(g_interrupt_stack + CONFIG_ARCH_INTERRUPTSTACK)
#endif
/* Save the task's stack and switch irq stack */
mov x1, sp
mov sp, x0
str x1, [sp, #-16]!
mov x0, x1 /* x0 = reg frame */
mov x0, sp
mov sp, x1
/* Call arm64_decodeirq() on the interrupt stack
* with interrupts disabled
@ -330,28 +195,15 @@ SECTION_FUNC(text, arm64_irq_handler)
bl arm64_decodeirq
/* Upon return from arm64_decodeirq, x0 holds the pointer to the
* call reg context area, which can be use to restore context.
* This may or may not be the same value that was passed to arm64_decodeirq:
* It will differ if a context switch is required.
*/
b arm64_exit_exception
ldr x1, [sp], #16
#ifdef CONFIG_SMP
/* Notes:
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
* This full barrier is also required by the membarrier system
* call.
*/
dsb ish
#endif
b arm64_exit_exception
/* TODO: if the arm64_fatal_handler return success, maybe need context switch */
/****************************************************************************
* Name: arm64_serror_handler
*
* Description:
* SError exception handler
*
****************************************************************************/
GTEXT(arm64_serror_handler)
SECTION_FUNC(text, arm64_serror_handler)
@ -359,7 +211,15 @@ SECTION_FUNC(text, arm64_serror_handler)
bl arm64_fatal_handler
/* Return here only in case of recoverable error */
b arm64_exit_exception
b arm64_exit_exception
/****************************************************************************
* Name: arm64_mode32_handler
*
* Description:
* Mode32 exception handler
*
****************************************************************************/
GTEXT(arm64_mode32_handler)
SECTION_FUNC(text, arm64_mode32_handler)
@ -369,6 +229,14 @@ SECTION_FUNC(text, arm64_mode32_handler)
b arm64_exit_exception
/****************************************************************************
* Name: arm64_frq_handler
*
* Description:
* Interrupt exception handler
*
****************************************************************************/
GTEXT(arm64_fiq_handler)
SECTION_FUNC(text, arm64_fiq_handler)
#ifndef CONFIG_ARM64_DECODEFIQ
@ -382,45 +250,22 @@ SECTION_FUNC(text, arm64_fiq_handler)
#else
/* Switch to FIQ stack and save current sp on it. */
#ifdef CONFIG_SMP
get_cpu_id x1
ldr x0, =(g_cpu_int_fiq_stacktop)
lsl x1, x1, #3
ldr x0, [x0, x1]
get_cpu_id x0
ldr x1, =(g_cpu_int_fiq_stacktop)
lsl x0, x0, #3
ldr x1, [x1, x0]
#else
ldr x0, =(g_interrupt_fiq_stack + CONFIG_ARCH_INTERRUPTSTACK)
ldr x1, =(g_interrupt_fiq_stack + CONFIG_ARCH_INTERRUPTSTACK)
#endif
/* Save the task's stack and switch fiq stack */
mov x1, sp
mov sp, x0
str x1, [sp, #-16]!
mov x0, sp
mov sp, x1
mov x0, x1 /* x0 = reg frame */
/* Call arm64_decodefiq() on the interrupt stack
/* Call arm64_decodeirq() on the interrupt stack
* with interrupts disabled
*/
bl arm64_decodefiq
/* Upon return from arm64_decodefiq, x0 holds the pointer to the
* call reg context area, which can be use to restore context.
* This may or may not be the same value that was passed to arm64_decodefiq:
* It will differ if a context switch is required.
*/
ldr x1, [sp], #16
#ifdef CONFIG_SMP
/* Notes:
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
* This full barrier is also required by the membarrier system
* call.
*/
dsb ish
bl arm64_decodeirq
#endif
b arm64_exit_exception
b arm64_exit_exception
#endif