sched: remove csection and reduce the interrupt disabling time in sched_[un]lock

reason:
1 Accelerated the implementation of sched_lock, remove enter_critical_section in sched_lock and
only enter_critical_section when task scheduling is required.
2 we add sched_lock_wo_note/sched_unlock_wo_note and it does not perform instrumentation logic

Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
hujun5 2025-01-19 15:52:00 +08:00 committed by Xiang Xiao
parent b49f4286fb
commit 914ae532e6
7 changed files with 46 additions and 278 deletions

View file

@ -2353,12 +2353,7 @@ static int32_t esp_task_create_pinned_to_core(void *entry,
DEBUGASSERT(task_handle != NULL);
#ifdef CONFIG_SMP
ret = sched_lock();
if (ret)
{
wlerr("Failed to lock scheduler before creating pinned thread\n");
return false;
}
sched_lock();
#endif
pid = kthread_create(name, prio, stack_depth, entry,
@ -2390,12 +2385,7 @@ static int32_t esp_task_create_pinned_to_core(void *entry,
}
#ifdef CONFIG_SMP
ret = sched_unlock();
if (ret)
{
wlerr("Failed to unlock scheduler after creating pinned thread\n");
return false;
}
sched_unlock();
#endif
return pid > 0;

View file

@ -110,6 +110,7 @@
#define TCB_FLAG_JOIN_COMPLETED (1 << 13) /* Bit 13: Pthread join completed */
#define TCB_FLAG_FREE_TCB (1 << 14) /* Bit 14: Free tcb after exit */
#define TCB_FLAG_SIGDELIVER (1 << 15) /* Bit 15: Deliver pending signals */
#define TCB_FLAG_PREEMPT_SCHED (1 << 16) /* Bit 16: tcb is PREEMPT_SCHED */
/* Values for struct task_group tg_flags */

View file

@ -265,8 +265,8 @@ int sched_cpucount(FAR const cpu_set_t *set);
/* Task Switching Interfaces (non-standard) */
int sched_lock(void);
int sched_unlock(void);
void sched_lock(void);
void sched_unlock(void);
int sched_lockcount(void);
/* Queries */

View file

@ -415,6 +415,8 @@ void nxsched_update_critmon(FAR struct tcb_s *tcb);
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
void nxsched_critmon_preemption(FAR struct tcb_s *tcb, bool state,
FAR void *caller);
#else
# define nxsched_critmon_preemption(t, s, c)
#endif
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0

View file

@ -64,112 +64,36 @@
*
****************************************************************************/
#ifdef CONFIG_SMP
int sched_lock(void)
void sched_lock(void)
{
FAR struct tcb_s *rtcb;
/* sched_lock() should have no effect if called from the interrupt level. */
/* If the CPU supports suppression of interprocessor interrupts, then
* simple disabling interrupts will provide sufficient protection for
* the following operation.
*/
rtcb = this_task();
/* Check for some special cases: (1) rtcb may be NULL only during early
* boot-up phases, and (2) sched_lock() should have no effect if called
* from the interrupt level.
*/
if (rtcb != NULL && !up_interrupt_context())
if (!up_interrupt_context())
{
irqstate_t flags;
FAR struct tcb_s *rtcb = this_task();
/* Catch attempts to increment the lockcount beyond the range of the
* integer type.
*/
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
DEBUGASSERT(rtcb == NULL || rtcb->lockcount < MAX_LOCK_COUNT);
flags = enter_critical_section();
/* A counter is used to support locking. This allows nested lock
* operations on this thread
*/
rtcb->lockcount++;
/* Check if we just acquired the lock */
if (rtcb->lockcount == 1)
{
/* Note that we have pre-emption locked */
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
nxsched_critmon_preemption(rtcb, true, return_address(0));
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
sched_note_preemption(rtcb, true);
#endif
}
/* Move any tasks in the ready-to-run list to the pending task list
* where they will not be available to run until the scheduler is
* unlocked and nxsched_merge_pending() is called.
*/
nxsched_merge_prioritized(list_readytorun(),
list_pendingtasks(),
TSTATE_TASK_PENDING);
leave_critical_section(flags);
}
return OK;
}
#else /* CONFIG_SMP */
int sched_lock(void)
{
FAR struct tcb_s *rtcb = this_task();
/* Check for some special cases: (1) rtcb may be NULL only during early
* boot-up phases, and (2) sched_lock() should have no effect if called
* from the interrupt level.
*/
if (rtcb != NULL && !up_interrupt_context())
{
/* Catch attempts to increment the lockcount beyond the range of the
* integer type.
*/
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
/* A counter is used to support locking. This allows nested lock
/* A counter is used to support locking. This allows nested lock
* operations on this thread (on any CPU)
*/
rtcb->lockcount++;
/* Check if we just acquired the lock */
if (rtcb->lockcount == 1)
if (rtcb != NULL && rtcb->lockcount++ == 0)
{
#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) || \
defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION)
irqstate_t flags = enter_critical_section_wo_note();
/* Note that we have pre-emption locked */
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
nxsched_critmon_preemption(rtcb, true, return_address(0));
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
sched_note_preemption(rtcb, true);
leave_critical_section_wo_note(flags);
#endif
}
}
return OK;
}
#endif /* CONFIG_SMP */

View file

@ -53,50 +53,30 @@
*
****************************************************************************/
#ifdef CONFIG_SMP
int sched_unlock(void)
void sched_unlock(void)
{
FAR struct tcb_s *rtcb;
/* sched_unlock should have no effect if called from the interrupt level. */
/* This operation is safe because the scheduler is locked and no context
* switch may occur.
*/
rtcb = this_task();
/* Check for some special cases: (1) rtcb may be NULL only during
* early boot-up phases, and (2) sched_unlock() should have no
* effect if called from the interrupt level.
*/
if (rtcb != NULL && !up_interrupt_context())
if (!up_interrupt_context())
{
/* Prevent context switches throughout the following. */
FAR struct tcb_s *rtcb = this_task();
irqstate_t flags = enter_critical_section();
int cpu = this_cpu();
/* rtcb may be NULL only during early boot-up phases */
DEBUGASSERT(rtcb->lockcount > 0);
DEBUGASSERT(rtcb == NULL || rtcb->lockcount > 0);
/* Decrement the preemption lock counter */
rtcb->lockcount--;
/* Check if the lock counter has decremented to zero. If so,
/* Check if the lock counter has decremented to zero. If so,
* then pre-emption has been re-enabled.
*/
if (rtcb->lockcount <= 0)
if (rtcb != NULL && --rtcb->lockcount == 0)
{
irqstate_t flags = enter_critical_section_wo_note();
/* Note that we no longer have pre-emption disabled. */
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
nxsched_critmon_preemption(rtcb, false, return_address(0));
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
sched_note_preemption(rtcb, false);
#endif
/* Release any ready-to-run tasks that have collected in
* g_pendingtasks.
@ -131,153 +111,25 @@ int sched_unlock(void)
* maximum.
*/
if (rtcb != current_task(cpu))
{
rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
}
#ifdef CONFIG_SCHED_TICKLESS
else
{
nxsched_reassess_timer();
}
#endif
}
#endif
#ifdef CONFIG_SCHED_SPORADIC
#if CONFIG_RR_INTERVAL > 0
else
#endif
/* If (1) the task that was running supported sporadic scheduling
* and (2) if its budget slice has already expired, but (3) it
* could not slice out because pre-emption was disabled, then we
* need to swap the task out now and reassess the interval timer
* for the next time slice.
*/
if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC
&& rtcb->timeslice < 0)
{
/* Yes.. that is the situation. Force the low-priority state
* now
*/
nxsched_sporadic_lowpriority(rtcb);
#ifdef CONFIG_SCHED_TICKLESS
/* Make sure that the call to nxsched_merge_pending() did not
* change the currently active task.
*/
if (rtcb == current_task(cpu))
{
nxsched_reassess_timer();
}
#endif
}
#endif
}
UNUSED(cpu);
leave_critical_section(flags);
}
return OK;
}
#else /* CONFIG_SMP */
int sched_unlock(void)
{
FAR struct tcb_s *rtcb = this_task();
/* Check for some special cases: (1) rtcb may be NULL only during
* early boot-up phases, and (2) sched_unlock() should have no
* effect if called from the interrupt level.
*/
if (rtcb != NULL && !up_interrupt_context())
{
/* Prevent context switches throughout the following. */
irqstate_t flags = enter_critical_section();
DEBUGASSERT(rtcb->lockcount > 0);
/* Decrement the preemption lock counter */
rtcb->lockcount--;
/* Check if the lock counter has decremented to zero. If so,
* then pre-emption has been re-enabled.
*/
if (rtcb->lockcount <= 0)
{
/* Note that we no longer have pre-emption disabled. */
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
nxsched_critmon_preemption(rtcb, false, return_address(0));
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
sched_note_preemption(rtcb, false);
#endif
/* Release any ready-to-run tasks that have collected in
* g_pendingtasks.
*
* NOTE: This operation has a very high likelihood of causing
* this task to be switched out!
*
* In the single CPU case, decrementing lockcount to zero is
* sufficient to release the pending tasks. Further, in that
* configuration, critical sections and pre-emption can operate
* fully independently.
*/
if (list_pendingtasks()->head != NULL)
{
if (nxsched_merge_pending())
{
up_switch_context(this_task(), rtcb);
}
}
#if CONFIG_RR_INTERVAL > 0
/* If (1) the task that was running supported round-robin
* scheduling and (2) if its time slice has already expired, but
* (3) it could not be sliced out because pre-emption was disabled,
* then we need to swap the task out now and reassess the interval
* timer for the next time slice.
*/
if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR &&
rtcb->timeslice == 0)
{
/* Yes.. that is the situation. But one more thing: The call
* to nxsched_merge_pending() above may have actually replaced
* the task at the head of the ready-to-run list. In that
* case, we need only to reset the timeslice value back to the
* maximum.
*/
if (rtcb != this_task())
{
rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
}
#ifdef CONFIG_SCHED_TICKLESS
else
# ifdef CONFIG_SCHED_TICKLESS
else if ((rtcb->flags & TCB_FLAG_PREEMPT_SCHED) == 0)
{
rtcb->flags |= TCB_FLAG_PREEMPT_SCHED;
nxsched_reassess_timer();
rtcb->flags &= ~TCB_FLAG_PREEMPT_SCHED;
}
#endif
# endif
}
#endif
#ifdef CONFIG_SCHED_SPORADIC
#if CONFIG_RR_INTERVAL > 0
# if CONFIG_RR_INTERVAL > 0
else
#endif
# endif
/* If (1) the task that was running supported sporadic scheduling
* and (2) if its budget slice has already expired, but (3) it
* could not slice out because pre-emption was disabled, then we
@ -294,24 +146,23 @@ int sched_unlock(void)
nxsched_sporadic_lowpriority(rtcb);
#ifdef CONFIG_SCHED_TICKLESS
# ifdef CONFIG_SCHED_TICKLESS
/* Make sure that the call to nxsched_merge_pending() did not
* change the currently active task.
*/
if (rtcb == this_task())
if (rtcb == this_task() &&
(rtcb->flags & TCB_FLAG_PREEMPT_SCHED) == 0)
{
rtcb->flags |= TCB_FLAG_PREEMPT_SCHED;
nxsched_reassess_timer();
rtcb->flags &= ~TCB_FLAG_PREEMPT_SCHED;
}
#endif
# endif
}
#endif
leave_critical_section_wo_note(flags);
}
leave_critical_section(flags);
}
return OK;
}
#endif /* CONFIG_SMP */

View file

@ -140,13 +140,13 @@
"sched_getcpu","sched.h","","int"
"sched_getparam","sched.h","","int","pid_t","FAR struct sched_param *"
"sched_getscheduler","sched.h","","int","pid_t"
"sched_lock","sched.h","","int"
"sched_lock","sched.h","","void"
"sched_lockcount","sched.h","","int"
"sched_rr_get_interval","sched.h","","int","pid_t","struct timespec *"
"sched_setaffinity","sched.h","defined(CONFIG_SMP)","int","pid_t","size_t","FAR const cpu_set_t*"
"sched_setparam","sched.h","","int","pid_t","const struct sched_param *"
"sched_setscheduler","sched.h","","int","pid_t","int","const struct sched_param *"
"sched_unlock","sched.h","","int"
"sched_unlock","sched.h","","void"
"sched_yield","sched.h","","int"
"select","sys/select.h","","int","int","FAR fd_set *","FAR fd_set *","FAR fd_set *","FAR struct timeval *"
"send","sys/socket.h","defined(CONFIG_NET)","ssize_t","int","FAR const void *","size_t","int"

Can't render this file because it has a wrong number of fields in line 2.