sched: Change SMP list "g_assignedtasks" to a vector
Since g_assignedtasks only holds the running task for each CPU, it can be just a vector. Idle tasks are already preserved in statically allocated structures "g_idletcb", and can be used from there. Signed-off-by: Jukka Laitinen <jukka.laitinen@tii.ae>
This commit is contained in:
parent
142f32121a
commit
4cc384757b
4 changed files with 30 additions and 107 deletions
|
|
@ -101,31 +101,19 @@ dq_queue_t g_readytorun;
|
||||||
* and
|
* and
|
||||||
* - Tasks/threads that have not been assigned to a CPU.
|
* - Tasks/threads that have not been assigned to a CPU.
|
||||||
*
|
*
|
||||||
* Otherwise, the TCB will be retained in an assigned task list,
|
* Otherwise, the running TCB will be retained in g_assignedtasks vector.
|
||||||
* g_assignedtasks. As its name suggests, on 'g_assignedtasks queue for CPU
|
* As its name suggests, on 'g_assignedtasks vector for CPU
|
||||||
* 'n' would contain only tasks/threads that are assigned to CPU 'n'. Tasks/
|
* 'n' would contain the task/thread which is assigned to CPU 'n'. Tasks/
|
||||||
* threads would be assigned a particular CPU by one of two mechanisms:
|
* threads would be assigned a particular CPU by one of two mechanisms:
|
||||||
*
|
*
|
||||||
* - (Semi-)permanently through an RTOS interfaces such as
|
* - (Semi-)permanently through an RTOS interfaces such as
|
||||||
* pthread_attr_setaffinity(), or
|
* pthread_attr_setaffinity(), or
|
||||||
* - Temporarily through scheduling logic when a previously unassigned task
|
* - Temporarily through scheduling logic when a previously unassigned task
|
||||||
* is made to run.
|
* is made to run.
|
||||||
*
|
|
||||||
* Tasks/threads that are assigned to a CPU via an interface like
|
|
||||||
* pthread_attr_setaffinity() would never go into the g_readytorun list, but
|
|
||||||
* would only go into the g_assignedtasks[n] list for the CPU 'n' to which
|
|
||||||
* the thread has been assigned. Hence, the g_readytorun list would hold
|
|
||||||
* only unassigned tasks/threads.
|
|
||||||
*
|
|
||||||
* Like the g_readytorun list in in non-SMP case, each g_assignedtask[] list
|
|
||||||
* is prioritized: The head of the list is the currently active task on this
|
|
||||||
* CPU. Tasks after the active task are ready-to-run and assigned to this
|
|
||||||
* CPU. The tail of this assigned task list, the lowest priority task, is
|
|
||||||
* always the CPU's IDLE task.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS];
|
FAR struct tcb_s *g_assignedtasks[CONFIG_SMP_NCPUS];
|
||||||
enum task_deliver_e g_delivertasks[CONFIG_SMP_NCPUS];
|
enum task_deliver_e g_delivertasks[CONFIG_SMP_NCPUS];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
@ -199,10 +187,6 @@ struct tasklist_s g_tasklisttable[NUM_TASK_STATES];
|
||||||
|
|
||||||
volatile uint8_t g_nx_initstate; /* See enum nx_initstate_e */
|
volatile uint8_t g_nx_initstate; /* See enum nx_initstate_e */
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Private Data
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
/* This is an array of task control block (TCB) for the IDLE thread of each
|
/* This is an array of task control block (TCB) for the IDLE thread of each
|
||||||
* CPU. For the non-SMP case, this is a a single TCB; For the SMP case,
|
* CPU. For the non-SMP case, this is a a single TCB; For the SMP case,
|
||||||
* there is one TCB per CPU. NOTE: The system boots on CPU0 into the IDLE
|
* there is one TCB per CPU. NOTE: The system boots on CPU0 into the IDLE
|
||||||
|
|
@ -211,7 +195,11 @@ volatile uint8_t g_nx_initstate; /* See enum nx_initstate_e */
|
||||||
* bringing up the rest of the system.
|
* bringing up the rest of the system.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static struct tcb_s g_idletcb[CONFIG_SMP_NCPUS];
|
struct tcb_s g_idletcb[CONFIG_SMP_NCPUS];
|
||||||
|
|
||||||
|
/****************************************************************************
|
||||||
|
* Private Data
|
||||||
|
****************************************************************************/
|
||||||
|
|
||||||
/* This is the name of the idle task */
|
/* This is the name of the idle task */
|
||||||
|
|
||||||
|
|
@ -251,19 +239,6 @@ static void tasklist_initialize(void)
|
||||||
tlist[TSTATE_TASK_READYTORUN].list = list_readytorun();
|
tlist[TSTATE_TASK_READYTORUN].list = list_readytorun();
|
||||||
tlist[TSTATE_TASK_READYTORUN].attr = TLIST_ATTR_PRIORITIZED;
|
tlist[TSTATE_TASK_READYTORUN].attr = TLIST_ATTR_PRIORITIZED;
|
||||||
|
|
||||||
/* TSTATE_TASK_ASSIGNED */
|
|
||||||
|
|
||||||
tlist[TSTATE_TASK_ASSIGNED].list = list_assignedtasks(0);
|
|
||||||
tlist[TSTATE_TASK_ASSIGNED].attr = TLIST_ATTR_PRIORITIZED |
|
|
||||||
TLIST_ATTR_INDEXED |
|
|
||||||
TLIST_ATTR_RUNNABLE;
|
|
||||||
|
|
||||||
/* TSTATE_TASK_RUNNING */
|
|
||||||
|
|
||||||
tlist[TSTATE_TASK_RUNNING].list = list_assignedtasks(0);
|
|
||||||
tlist[TSTATE_TASK_RUNNING].attr = TLIST_ATTR_PRIORITIZED |
|
|
||||||
TLIST_ATTR_INDEXED |
|
|
||||||
TLIST_ATTR_RUNNABLE;
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
/* TSTATE_TASK_PENDING */
|
/* TSTATE_TASK_PENDING */
|
||||||
|
|
@ -346,7 +321,6 @@ static void tasklist_initialize(void)
|
||||||
static void idle_task_initialize(void)
|
static void idle_task_initialize(void)
|
||||||
{
|
{
|
||||||
FAR struct tcb_s *tcb;
|
FAR struct tcb_s *tcb;
|
||||||
FAR dq_queue_t *tasklist;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
memset(g_idletcb, 0, sizeof(g_idletcb));
|
memset(g_idletcb, 0, sizeof(g_idletcb));
|
||||||
|
|
@ -422,11 +396,10 @@ static void idle_task_initialize(void)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
tasklist = TLIST_HEAD(tcb, i);
|
g_assignedtasks[i] = tcb;
|
||||||
#else
|
#else
|
||||||
tasklist = TLIST_HEAD(tcb);
|
dq_addfirst((FAR dq_entry_t *)tcb, TLIST_HEAD(tcb));
|
||||||
#endif
|
#endif
|
||||||
dq_addfirst((FAR dq_entry_t *)tcb, tasklist);
|
|
||||||
|
|
||||||
/* Mark the idle task as the running task */
|
/* Mark the idle task as the running task */
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -60,7 +60,6 @@
|
||||||
#define list_waitingforfill() (&g_waitingforfill)
|
#define list_waitingforfill() (&g_waitingforfill)
|
||||||
#define list_stoppedtasks() (&g_stoppedtasks)
|
#define list_stoppedtasks() (&g_stoppedtasks)
|
||||||
#define list_inactivetasks() (&g_inactivetasks)
|
#define list_inactivetasks() (&g_inactivetasks)
|
||||||
#define list_assignedtasks(cpu) (&g_assignedtasks[cpu])
|
|
||||||
|
|
||||||
/* These are macros to access the current CPU and the current task on a CPU.
|
/* These are macros to access the current CPU and the current task on a CPU.
|
||||||
* These macros are intended to support a future SMP implementation.
|
* These macros are intended to support a future SMP implementation.
|
||||||
|
|
@ -68,7 +67,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
# define current_task(cpu) ((FAR struct tcb_s *)list_assignedtasks(cpu)->head)
|
# define current_task(cpu) (g_assignedtasks[cpu])
|
||||||
#else
|
#else
|
||||||
# define current_task(cpu) ((FAR struct tcb_s *)list_readytorun()->head)
|
# define current_task(cpu) ((FAR struct tcb_s *)list_readytorun()->head)
|
||||||
#endif
|
#endif
|
||||||
|
|
@ -176,36 +175,30 @@ extern dq_queue_t g_readytorun;
|
||||||
* and
|
* and
|
||||||
* - Tasks/threads that have not been assigned to a CPU.
|
* - Tasks/threads that have not been assigned to a CPU.
|
||||||
*
|
*
|
||||||
* Otherwise, the TCB will be retained in an assigned task list,
|
* Otherwise, the TCB will be retained in an assigned task vector,
|
||||||
* g_assignedtasks. As its name suggests, on 'g_assignedtasks queue for CPU
|
* g_assignedtasks. As its name suggests, on 'g_assignedtasks vector for CPU
|
||||||
* 'n' would contain only tasks/threads that are assigned to CPU 'n'. Tasks/
|
* 'n' would contain only the task/thread which is running on the CPU 'n'.
|
||||||
* threads would be assigned a particular CPU by one of two mechanisms:
|
* Tasks/threads would be assigned a particular CPU by one of two
|
||||||
|
* mechanisms:
|
||||||
*
|
*
|
||||||
* - (Semi-)permanently through an RTOS interfaces such as
|
* - (Semi-)permanently through an RTOS interfaces such as
|
||||||
* pthread_attr_setaffinity(), or
|
* pthread_attr_setaffinity(), or
|
||||||
* - Temporarily through scheduling logic when a previously unassigned task
|
* - Temporarily through scheduling logic when a previously unassigned task
|
||||||
* is made to run.
|
* is made to run.
|
||||||
*
|
|
||||||
* Tasks/threads that are assigned to a CPU via an interface like
|
|
||||||
* pthread_attr_setaffinity() would never go into the g_readytorun list, but
|
|
||||||
* would only go into the g_assignedtasks[n] list for the CPU 'n' to which
|
|
||||||
* the thread has been assigned. Hence, the g_readytorun list would hold
|
|
||||||
* only unassigned tasks/threads.
|
|
||||||
*
|
|
||||||
* Like the g_readytorun list in in non-SMP case, each g_assignedtask[] list
|
|
||||||
* is prioritized: The head of the list is the currently active task on this
|
|
||||||
* CPU. Tasks after the active task are ready-to-run and assigned to this
|
|
||||||
* CPU. The tail of this assigned task list, the lowest priority task, is
|
|
||||||
* always the CPU's IDLE task.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS];
|
extern FAR struct tcb_s *g_assignedtasks[CONFIG_SMP_NCPUS];
|
||||||
|
|
||||||
/* g_delivertasks is used to indicate that a task switch is scheduled for
|
/* g_delivertasks is used to indicate that a task switch is scheduled for
|
||||||
* another cpu to be processed.
|
* another cpu to be processed.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern enum task_deliver_e g_delivertasks[CONFIG_SMP_NCPUS];
|
extern enum task_deliver_e g_delivertasks[CONFIG_SMP_NCPUS];
|
||||||
|
|
||||||
|
/* This is the list of idle tasks */
|
||||||
|
|
||||||
|
extern FAR struct tcb_s g_idletcb[CONFIG_SMP_NCPUS];
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* This is the list of all tasks that are ready-to-run, but cannot be placed
|
/* This is the list of all tasks that are ready-to-run, but cannot be placed
|
||||||
|
|
|
||||||
|
|
@ -181,33 +181,23 @@ bool nxsched_switch_running(int cpu, bool switch_equal)
|
||||||
if (CPU_ISSET(cpu, &btcb->affinity) &&
|
if (CPU_ISSET(cpu, &btcb->affinity) &&
|
||||||
((btcb->flags & TCB_FLAG_CPU_LOCKED) == 0 || btcb->cpu == cpu))
|
((btcb->flags & TCB_FLAG_CPU_LOCKED) == 0 || btcb->cpu == cpu))
|
||||||
{
|
{
|
||||||
FAR dq_queue_t *tasklist = list_assignedtasks(cpu);
|
|
||||||
|
|
||||||
/* Found a task, remove it from ready-to-run list */
|
/* Found a task, remove it from ready-to-run list */
|
||||||
|
|
||||||
dq_rem((FAR struct dq_entry_s *)btcb, list_readytorun());
|
dq_rem((FAR struct dq_entry_s *)btcb, list_readytorun());
|
||||||
|
|
||||||
/* Remove the current task from assigned tasks list and put it
|
|
||||||
* to the ready-to-run. But leave idle task.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (!is_idle_task(rtcb))
|
if (!is_idle_task(rtcb))
|
||||||
{
|
{
|
||||||
dq_remfirst(tasklist);
|
/* Put currently running task back to ready-to-run list */
|
||||||
|
|
||||||
rtcb->task_state = TSTATE_TASK_READYTORUN;
|
rtcb->task_state = TSTATE_TASK_READYTORUN;
|
||||||
nxsched_add_prioritized(rtcb, list_readytorun());
|
nxsched_add_prioritized(rtcb, list_readytorun());
|
||||||
|
|
||||||
/* We should now have only the idle task assigned */
|
|
||||||
|
|
||||||
DEBUGASSERT(
|
|
||||||
is_idle_task((FAR struct tcb_s *)dq_peek(tasklist)));
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
rtcb->task_state = TSTATE_TASK_ASSIGNED;
|
rtcb->task_state = TSTATE_TASK_ASSIGNED;
|
||||||
}
|
}
|
||||||
|
|
||||||
dq_addfirst((FAR dq_entry_t *)btcb, tasklist);
|
g_assignedtasks[cpu] = btcb;
|
||||||
up_update_task(btcb);
|
up_update_task(btcb);
|
||||||
|
|
||||||
btcb->cpu = cpu;
|
btcb->cpu = cpu;
|
||||||
|
|
|
||||||
|
|
@ -137,7 +137,6 @@ void nxsched_remove_self(FAR struct tcb_s *tcb)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static void nxsched_remove_running(FAR struct tcb_s *tcb)
|
static void nxsched_remove_running(FAR struct tcb_s *tcb)
|
||||||
{
|
{
|
||||||
FAR dq_queue_t *tasklist;
|
|
||||||
FAR struct tcb_s *nxttcb;
|
FAR struct tcb_s *nxttcb;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
|
@ -149,40 +148,10 @@ static void nxsched_remove_running(FAR struct tcb_s *tcb)
|
||||||
tcb->task_state == TSTATE_TASK_RUNNING);
|
tcb->task_state == TSTATE_TASK_RUNNING);
|
||||||
|
|
||||||
cpu = tcb->cpu;
|
cpu = tcb->cpu;
|
||||||
tasklist = &g_assignedtasks[cpu];
|
|
||||||
|
|
||||||
/* Check if the TCB to be removed is at the head of a running list.
|
/* Next task will be the idle task */
|
||||||
* For the case of SMP, there are two lists involved: (1) the
|
|
||||||
* g_readytorun list that holds non-running tasks that have not been
|
|
||||||
* assigned to a CPU, and (2) and the g_assignedtasks[] lists which hold
|
|
||||||
* tasks assigned a CPU, including the task that is currently running on
|
|
||||||
* that CPU. Only this latter list contains the currently active task
|
|
||||||
* only removing the head of that list can result in a context switch.
|
|
||||||
*
|
|
||||||
* tcb->blink == NULL will tell us if the TCB is at the head of the
|
|
||||||
* running list and, hence, a candidate for the new running task.
|
|
||||||
*
|
|
||||||
* If so, then the tasklist RUNNABLE attribute will inform us if the list
|
|
||||||
* holds the currently executing task and, hence, if a context switch
|
|
||||||
* should occur.
|
|
||||||
*/
|
|
||||||
|
|
||||||
DEBUGASSERT(tcb->blink == NULL);
|
nxttcb = &g_idletcb[cpu];
|
||||||
DEBUGASSERT(TLIST_ISRUNNABLE(tcb->task_state));
|
|
||||||
|
|
||||||
/* There must always be at least one task in the list (the IDLE task)
|
|
||||||
* after the TCB being removed.
|
|
||||||
*/
|
|
||||||
|
|
||||||
nxttcb = tcb->flink;
|
|
||||||
DEBUGASSERT(nxttcb != NULL && is_idle_task(nxttcb));
|
|
||||||
|
|
||||||
/* The task is running but the CPU that it was running on has been
|
|
||||||
* paused. We can now safely remove its TCB from the running
|
|
||||||
* task list.
|
|
||||||
*/
|
|
||||||
|
|
||||||
dq_remfirst(tasklist);
|
|
||||||
|
|
||||||
/* Since the TCB is no longer in any list, it is now invalid */
|
/* Since the TCB is no longer in any list, it is now invalid */
|
||||||
|
|
||||||
|
|
@ -191,6 +160,7 @@ static void nxsched_remove_running(FAR struct tcb_s *tcb)
|
||||||
/* Activate the idle task */
|
/* Activate the idle task */
|
||||||
|
|
||||||
nxttcb->task_state = TSTATE_TASK_RUNNING;
|
nxttcb->task_state = TSTATE_TASK_RUNNING;
|
||||||
|
g_assignedtasks[cpu] = nxttcb;
|
||||||
up_update_task(nxttcb);
|
up_update_task(nxttcb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -213,10 +183,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *tcb)
|
||||||
|
|
||||||
tasklist = TLIST_HEAD(tcb, tcb->cpu);
|
tasklist = TLIST_HEAD(tcb, tcb->cpu);
|
||||||
|
|
||||||
/* The task is not running. Just remove its TCB from the task
|
/* The task is not running. Just remove its TCB from the task list */
|
||||||
* list. In the SMP case this may be either the g_readytorun() or the
|
|
||||||
* g_assignedtasks[cpu] list.
|
|
||||||
*/
|
|
||||||
|
|
||||||
dq_rem((FAR dq_entry_t *)tcb, tasklist);
|
dq_rem((FAR dq_entry_t *)tcb, tasklist);
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue