sched/wqueue: Improve performance of the work_queue.

This commit improve the performance of the work_queue by reducing
unnecessary wdog timer setting.

Signed-off-by: ouyangxiangzhen <ouyangxiangzhen@xiaomi.com>
This commit is contained in:
ouyangxiangzhen 2025-05-08 11:24:46 +08:00 committed by archer
parent f442a41102
commit 36a4d5feaf
2 changed files with 22 additions and 24 deletions

View file

@ -90,16 +90,6 @@ static int work_qqueue(FAR struct usr_wqueue_s *wqueue,
work->arg = arg; /* Callback argument */
work->qtime = clock() + delay; /* Delay until work performed */
/* delay+1 is to prevent the insufficient sleep time if we are
* currently near the boundary to the next tick.
* | current_tick | current_tick + 1 | current_tick + 2 | .... |
* | ^ Here we get the current tick
* In this case we delay 1 tick, timer will be triggered at
* current_tick + 1, which is not enough for at least 1 tick.
*/
work->qtime += 1;
/* Insert the work into the wait queue sorted by the expired time. */
head = list_first_entry(&wqueue->q, struct work_s, node);

View file

@ -80,19 +80,14 @@ int work_queue_period_wq(FAR struct kwork_wqueue_s *wqueue,
FAR void *arg, clock_t delay, clock_t period)
{
irqstate_t flags;
clock_t expected;
bool wake = false;
int ret = OK;
clock_t expected;
bool retimer;
if (wqueue == NULL || work == NULL || worker == NULL)
{
return -EINVAL;
}
/* Ensure the work has been canceled. */
work_cancel_wq(wqueue, work);
/* delay+1 is to prevent the insufficient sleep time if we are
* currently near the boundary to the next tick.
* | current_tick | current_tick + 1 | current_tick + 2 | .... |
@ -109,6 +104,10 @@ int work_queue_period_wq(FAR struct kwork_wqueue_s *wqueue,
flags = spin_lock_irqsave(&wqueue->lock);
/* Ensure the work has been removed. */
retimer = work_available(work) ? false : work_remove(wqueue, work);
/* Initialize the work structure. */
work->worker = worker; /* Work callback. non-NULL means queued */
@ -116,32 +115,41 @@ int work_queue_period_wq(FAR struct kwork_wqueue_s *wqueue,
work->qtime = expected; /* Expected time */
work->period = period; /* Periodical delay */
/* Insert to the pending list of the wqueue. */
if (delay)
{
/* Insert to the pending list of the wqueue. */
if (work_insert_pending(wqueue, work))
{
/* Start the timer if the work is the earliest expired work. */
ret = wd_start_abstick(&wqueue->timer, work->qtime,
work_timer_expired, (wdparm_t)wqueue);
retimer = false;
wd_start_abstick(&wqueue->timer, work->qtime,
work_timer_expired, (wdparm_t)wqueue);
}
}
else
{
/* Insert to the expired list of the wqueue. */
list_add_tail(&wqueue->expired, &work->node);
wake = true;
}
if (retimer)
{
work_timer_reset(wqueue);
}
spin_unlock_irqrestore(&wqueue->lock, flags);
if (wake)
if (!delay)
{
/* Immediately wake up the worker thread. */
nxsem_post(&wqueue->sem);
}
return ret;
return 0;
}
int work_queue_period(int qid, FAR struct work_s *work, worker_t worker,