sched/sched: Fix some typos in comments.

Signed-off-by: Abdelatif Guettouche <abdelatif.guettouche@espressif.com>
This commit is contained in:
Abdelatif Guettouche 2021-11-18 18:45:00 +01:00 committed by Xiang Xiao
parent 6fde1945a7
commit bdc157f443
5 changed files with 10 additions and 14 deletions

View file

@ -143,7 +143,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* - The caller has already removed the input rtcb from whatever list it
* was in.
* - The caller handles the condition that occurs if the head of the
* ready-to-run list is changed.
* ready-to-run list has changed.
*
****************************************************************************/
@ -162,7 +162,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
if ((btcb->flags & TCB_FLAG_CPU_LOCKED) != 0)
{
/* Yes.. that that is the CPU we must use */
/* Yes.. that is the CPU we must use */
cpu = btcb->cpu;
}

View file

@ -67,23 +67,19 @@ int nxsched_select_cpu(cpu_set_t affinity)
int cpu;
int i;
/* Otherwise, find the CPU that is executing the lowest priority task
* (possibly its IDLE task).
*/
minprio = SCHED_PRIORITY_MAX;
cpu = IMPOSSIBLE_CPU;
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
{
/* If the thread permitted to run on this CPU? */
/* Is the thread permitted to run on this CPU? */
if ((affinity & (1 << i)) != 0)
{
FAR struct tcb_s *rtcb = (FAR struct tcb_s *)
g_assignedtasks[i].head;
/* If this thread is executing its IDLE task, the use it. The
/* If this CPU is executing its IDLE task, then use it. The
* IDLE task is always the last task in the assigned task list.
*/

View file

@ -85,10 +85,10 @@
* This assures that the scheduler does enforce the critical section.
* NOTE: Because of this spinlock, there should never be more than one
* bit set in 'g_cpu_lockset'; attempts to set additional bits should
* be cause the CPU to block on the spinlock. However, additional bits
* cause the CPU to block on the spinlock. However, additional bits
* could get set in 'g_cpu_lockset' due to the context switches on the
* various CPUs.
* 5. Each the time the head of a g_assignedtasks[] list changes and the
* 5. Each time the head of a g_assignedtasks[] list changes and the
* scheduler modifies 'g_cpu_lockset', it must also set 'g_cpu_schedlock'
* depending on the new state of 'g_cpu_lockset'.
* 5. Logic that currently uses the currently running tasks lockcount

View file

@ -39,7 +39,7 @@
* Name: nxsched_merge_prioritized
*
* Description:
* This function merges the content of the prioritized task list '1ist1'
* This function merges the content of the prioritized task list 'list1'
* into the prioritized task list, 'list2'. On return 'list2' will contain
* the prioritized content of both lists; 'list1' will be empty.
*

View file

@ -263,7 +263,7 @@ int sched_unlock(void)
* NOTE: This operation has a very high likelihood of causing
* this task to be switched out!
*
* In the single CPU case, decrementing irqcount to zero is
* In the single CPU case, decrementing lockcount to zero is
* sufficient to release the pending tasks. Further, in that
* configuration, critical sections and pre-emption can operate
* fully independently.
@ -277,7 +277,7 @@ int sched_unlock(void)
#if CONFIG_RR_INTERVAL > 0
/* If (1) the task that was running supported round-robin
* scheduling and (2) if its time slice has already expired, but
* (3) it could not slice out because pre-emption was disabled,
* (3) it could not be sliced out because pre-emption was disabled,
* then we need to swap the task out now and reassess the interval
* timer for the next time slice.
*/
@ -285,7 +285,7 @@ int sched_unlock(void)
if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR &&
rtcb->timeslice == 0)
{
/* Yes.. that is the situation. But one more thing. The call
/* Yes.. that is the situation. But one more thing: The call
* to up_release_pending() above may have actually replaced
* the task at the head of the ready-to-run list. In that
* case, we need only to reset the timeslice value back to the