sched: remove csection in event
We hope to replace the large lock with a small lock to improve concurrency performance. Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
3629a3b5a1
commit
4cea148088
4 changed files with 18 additions and 33 deletions
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
#include <nuttx/list.h>
|
||||
#include <nuttx/semaphore.h>
|
||||
#include <nuttx/spinlock.h>
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
|
|
@ -70,8 +71,9 @@ struct nxevent_wait_s
|
|||
|
||||
struct nxevent_s
|
||||
{
|
||||
struct list_node list; /* Waiting list of nxevent_wait_t */
|
||||
volatile nxevent_mask_t events; /* Pending Events */
|
||||
struct list_node list; /* Waiting list of nxevent_wait_t */
|
||||
volatile nxevent_mask_t events; /* Pending Events */
|
||||
spinlock_t lock; /* Spinlock */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_FS_NAMED_EVENTS
|
||||
|
|
|
|||
|
|
@ -49,5 +49,6 @@
|
|||
void nxevent_init(FAR nxevent_t *event, nxevent_mask_t events)
|
||||
{
|
||||
event->events = events;
|
||||
spin_lock_init(&event->lock);
|
||||
list_initialize(&event->list);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,23 +32,6 @@
|
|||
* Private Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: nxevent_sem_post
|
||||
****************************************************************************/
|
||||
|
||||
static inline_function int nxevent_sem_post(FAR sem_t *sem)
|
||||
{
|
||||
int semcount;
|
||||
|
||||
nxsem_get_value(sem, &semcount);
|
||||
if (semcount < 1)
|
||||
{
|
||||
return nxsem_post(sem);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
|
@ -98,7 +81,7 @@ int nxevent_post(FAR nxevent_t *event, nxevent_mask_t events,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave_nopreempt(&event->lock);
|
||||
|
||||
if ((eflags & NXEVENT_POST_SET) != 0)
|
||||
{
|
||||
|
|
@ -113,12 +96,6 @@ int nxevent_post(FAR nxevent_t *event, nxevent_mask_t events,
|
|||
{
|
||||
postall = ((eflags & NXEVENT_POST_ALL) != 0);
|
||||
|
||||
/* Hold schedule lock here to avoid context switch if post high
|
||||
* priority task.
|
||||
*/
|
||||
|
||||
sched_lock();
|
||||
|
||||
list_for_every_entry_safe(&event->list, wait, tmp,
|
||||
nxevent_wait_t, node)
|
||||
{
|
||||
|
|
@ -129,7 +106,7 @@ int nxevent_post(FAR nxevent_t *event, nxevent_mask_t events,
|
|||
{
|
||||
list_delete_init(&wait->node);
|
||||
|
||||
ret = nxevent_sem_post(&wait->sem);
|
||||
ret = nxsem_post(&wait->sem);
|
||||
if (ret < 0)
|
||||
{
|
||||
continue;
|
||||
|
|
@ -156,11 +133,9 @@ int nxevent_post(FAR nxevent_t *event, nxevent_mask_t events,
|
|||
{
|
||||
event->events &= ~clear;
|
||||
}
|
||||
|
||||
sched_unlock();
|
||||
}
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore_nopreempt(&event->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ nxevent_mask_t nxevent_tickwait_wait(FAR nxevent_t *event,
|
|||
events = ~0;
|
||||
}
|
||||
|
||||
flags = enter_critical_section();
|
||||
flags = spin_lock_irqsave(&event->lock);
|
||||
|
||||
if ((eflags & NXEVENT_WAIT_RESET) != 0)
|
||||
{
|
||||
|
|
@ -126,6 +126,7 @@ nxevent_mask_t nxevent_tickwait_wait(FAR nxevent_t *event,
|
|||
wait->eflags = eflags;
|
||||
|
||||
list_add_tail(&event->list, &(wait->node));
|
||||
spin_unlock_irqrestore(&event->lock, flags);
|
||||
|
||||
/* Wait for the event */
|
||||
|
||||
|
|
@ -141,19 +142,25 @@ nxevent_mask_t nxevent_tickwait_wait(FAR nxevent_t *event,
|
|||
/* Destroy local variables */
|
||||
|
||||
nxsem_destroy(&(wait->sem));
|
||||
list_delete(&(wait->node));
|
||||
|
||||
flags = spin_lock_irqsave(&event->lock);
|
||||
if (ret == 0)
|
||||
{
|
||||
events = wait->expect;
|
||||
DEBUGASSERT(!list_in_list(&(wait->node)));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (list_in_list(&(wait->node)))
|
||||
{
|
||||
list_delete(&(wait->node));
|
||||
}
|
||||
|
||||
events = 0;
|
||||
}
|
||||
}
|
||||
|
||||
leave_critical_section(flags);
|
||||
spin_unlock_irqrestore(&event->lock, flags);
|
||||
|
||||
return events;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue