irq: All irq_cpu_locked is called in the critical_section, and the parameter is the current cpu id.
so it must return false, We can safely delete. Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
1b068b0d4b
commit
82acf6e6a7
6 changed files with 5 additions and 108 deletions
|
|
@ -150,33 +150,6 @@ void irq_initialize(void);
|
|||
|
||||
int irq_unexpected_isr(int irq, FAR void *context, FAR void *arg);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: irq_cpu_locked
|
||||
*
|
||||
* Description:
|
||||
* Test if the IRQ lock set OR if this CPU holds the IRQ lock
|
||||
* There is an interaction with pre-emption controls and IRQ locking:
|
||||
* Even if the pre-emption is enabled, tasks will be forced to pend if
|
||||
* the IRQ lock is also set UNLESS the CPU starting the task is the
|
||||
* holder of the IRQ lock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* rtcb - Points to the blocked TCB that is ready-to-run
|
||||
*
|
||||
* Returned Value:
|
||||
* true - IRQs are locked by a different CPU.
|
||||
* false - IRQs are unlocked OR if they are locked BUT this CPU
|
||||
* is the holder of the lock.
|
||||
*
|
||||
* Warning: This values are volatile at only valid at the instance that
|
||||
* the CPU set was queried.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
bool irq_cpu_locked(int cpu);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: irq_foreach
|
||||
*
|
||||
|
|
|
|||
|
|
@ -608,80 +608,6 @@ void leave_critical_section(irqstate_t flags)
|
|||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: irq_cpu_locked
|
||||
*
|
||||
* Description:
|
||||
* Test if the IRQ lock set OR if this CPU holds the IRQ lock
|
||||
* There is an interaction with pre-emption controls and IRQ locking:
|
||||
* Even if the pre-emption is enabled, tasks will be forced to pend if
|
||||
* the IRQ lock is also set UNLESS the CPU starting the task is the
|
||||
* holder of the IRQ lock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - Points to which cpu
|
||||
*
|
||||
* Returned Value:
|
||||
* true - IRQs are locked by a different CPU.
|
||||
* false - IRQs are unlocked OR if they are locked BUT this CPU
|
||||
* is the holder of the lock.
|
||||
*
|
||||
* Warning: This values are volatile at only valid at the instance that
|
||||
* the CPU set was queried.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
bool irq_cpu_locked(int cpu)
|
||||
{
|
||||
cpu_set_t irqset;
|
||||
|
||||
/* g_cpu_irqset is not valid in early phases of initialization */
|
||||
|
||||
if (nxsched_get_initstate() < OSINIT_OSREADY)
|
||||
{
|
||||
/* We are still single threaded. In either state of g_cpu_irqlock,
|
||||
* the correct return value should always be false.
|
||||
*/
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Test if g_cpu_irqlock is locked. We don't really need to use check
|
||||
* g_cpu_irqlock to do this, we can use the g_cpu_set.
|
||||
*
|
||||
* Sample the g_cpu_irqset once. That is an atomic operation. All
|
||||
* subsequent operations will operate on the sampled cpu set.
|
||||
*/
|
||||
|
||||
irqset = (cpu_set_t)g_cpu_irqset;
|
||||
if (irqset != 0)
|
||||
{
|
||||
/* Some CPU holds the lock. So g_cpu_irqlock should be locked.
|
||||
* Return false if the 'cpu' is the holder of the lock; return
|
||||
* true if g_cpu_irqlock is locked, but this CPU is not the
|
||||
* holder of the lock.
|
||||
*/
|
||||
|
||||
return ((irqset & (1 << cpu)) == 0);
|
||||
}
|
||||
|
||||
/* No CPU holds the lock */
|
||||
|
||||
else
|
||||
{
|
||||
/* In this case g_cpu_irqlock should be unlocked. However, if
|
||||
* the lock was established in the interrupt handler AND there are
|
||||
* no bits set in g_cpu_irqset, that probably means only that
|
||||
* critical section was established from an interrupt handler.
|
||||
* Return false in either case.
|
||||
*/
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: restore_critical_section
|
||||
*
|
||||
|
|
|
|||
|
|
@ -223,7 +223,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
|
|||
*/
|
||||
|
||||
me = this_cpu();
|
||||
if ((nxsched_islocked_global() || irq_cpu_locked(me)) &&
|
||||
if ((nxsched_islocked_global()) &&
|
||||
task_state != TSTATE_TASK_ASSIGNED)
|
||||
{
|
||||
/* Add the new ready-to-run task to the g_pendingtasks task list for
|
||||
|
|
|
|||
|
|
@ -190,7 +190,6 @@ bool nxsched_merge_pending(void)
|
|||
FAR struct tcb_s *tcb;
|
||||
bool ret = false;
|
||||
int cpu;
|
||||
int me;
|
||||
|
||||
/* Remove and process every TCB in the g_pendingtasks list.
|
||||
*
|
||||
|
|
@ -198,8 +197,7 @@ bool nxsched_merge_pending(void)
|
|||
* some CPU other than this one is in a critical section.
|
||||
*/
|
||||
|
||||
me = this_cpu();
|
||||
if (!nxsched_islocked_global() && !irq_cpu_locked(me))
|
||||
if (!nxsched_islocked_global())
|
||||
{
|
||||
/* Find the CPU that is executing the lowest priority task */
|
||||
|
||||
|
|
@ -237,7 +235,7 @@ bool nxsched_merge_pending(void)
|
|||
* Check if that happened.
|
||||
*/
|
||||
|
||||
if (nxsched_islocked_global() || irq_cpu_locked(me))
|
||||
if (nxsched_islocked_global())
|
||||
{
|
||||
/* Yes.. then we may have incorrectly placed some TCBs in the
|
||||
* g_readytorun list (unlikely, but possible). We will have to
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s *tcb)
|
|||
* then use the 'nxttcb' which will probably be the IDLE thread.
|
||||
*/
|
||||
|
||||
if (!nxsched_islocked_global() && !irq_cpu_locked(this_cpu()))
|
||||
if (!nxsched_islocked_global())
|
||||
{
|
||||
/* Search for the highest priority task that can run on tcb->cpu. */
|
||||
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ int sched_unlock(void)
|
|||
* BEFORE it clears IRQ lock.
|
||||
*/
|
||||
|
||||
if (!nxsched_islocked_global() && !irq_cpu_locked(cpu) &&
|
||||
if (!nxsched_islocked_global() &&
|
||||
list_pendingtasks()->head != NULL)
|
||||
{
|
||||
if (nxsched_merge_pending())
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue