spinlock: add spin_lock_irqsave_nopreempt/spin_unlock_irqrestore_nopreempt
reason: By using these interfaces, the following usage can be replaced. flags = spin_lock_irqsave(lock); sched_lock(); ..... spin_unlock_irqrestore(lock, flags); sched_unlock(); Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
82e17623b1
commit
907d77a056
1 changed files with 65 additions and 0 deletions
|
|
@ -499,6 +499,39 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
|
|||
# define spin_lock_irqsave(l) ((void)(l), up_irq_save())
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_lock_irqsave_nopreempt
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* Disable local interrupts, sched_lock and take the lock spinlock and
|
||||
* return the interrupt state.
|
||||
*
|
||||
* NOTE: This API is very simple to protect data (e.g. H/W register
|
||||
* or internal data structure) in SMP mode. But do not use this API
|
||||
* with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_save() + sched_lock().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. not NULL.
|
||||
*
|
||||
* Returned Value:
|
||||
* An opaque, architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to spin_lock_irqsave(lock);
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
static inline_function
|
||||
irqstate_t spin_lock_irqsave_nopreempt(FAR volatile spinlock_t *lock)
|
||||
{
|
||||
irqstate_t flags;
|
||||
flags = spin_lock_irqsave(lock);
|
||||
sched_lock();
|
||||
return flags;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_trylock_irqsave_notrace
|
||||
*
|
||||
|
|
@ -633,6 +666,38 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock, irqstate_t flags)
|
|||
# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_unlock_irqrestore_nopreempt
|
||||
*
|
||||
* Description:
|
||||
* If SMP is enabled:
|
||||
* Release the lock and restore the interrupt state, sched_unlock
|
||||
* as it was prior to the previous call to
|
||||
* spin_unlock_irqrestore_nopreempt(lock).
|
||||
*
|
||||
* If SMP is not enabled:
|
||||
* This function is equivalent to up_irq_restore() + sched_unlock().
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - Caller specific spinlock. not NULL
|
||||
*
|
||||
* flags - The architecture-specific value that represents the state of
|
||||
* the interrupts prior to the call to
|
||||
* spin_unlock_irqrestore_nopreempt(lock);
|
||||
*
|
||||
* Returned Value:
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
static inline_function
|
||||
void spin_unlock_irqrestore_nopreempt(FAR volatile spinlock_t *lock,
|
||||
irqstate_t flags)
|
||||
{
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
sched_unlock();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_RW_SPINLOCK)
|
||||
|
||||
/****************************************************************************
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue