fs/spinlock: replace no sched_lock() version to improve the performance

after below change merge to kernel, spin_lock() will turn off preemption by default,
but this change is not applicable to all scenarios. The locations in the kernel that
use spin_lock() extensively only require short critical sections and do not trigger
scheduling, which leads to serious performance degradation of NuttX in AMP mode.

In this PR, I try to expose similar problems and hope that each subsystem will carefully check the code coverage

https://github.com/apache/nuttx/pull/14578
|commit b69111d16a
|Author: hujun5 <hujun5@xiaomi.com>
|Date:   Thu Jan 23 16:14:18 2025 +0800
|
|    spinlock: add sched_lock to spin_lock_irqsave
|
|    reason:
|    We aim to replace big locks with smaller ones. So we will use spin_lock_irqsave extensively to
|    replace enter_critical_section in the subsequent process. We imitate the implementation of Linux
|    by adding sched_lock to spin_lock_irqsave in order to address scenarios where sem_post occurs
|    within spin_lock_irqsave, which can lead to spinlock failures and deadlocks.
|
|    Signed-off-by: hujun5 <hujun5@xiaomi.com>

Signed-off-by: chao an <anchao.archer@bytedance.com>
This commit is contained in:
chao an 2025-01-24 09:58:04 +08:00 committed by Xiang Xiao
parent b9e995b321
commit ace7f0c633

View file

@ -72,9 +72,9 @@ static FAR struct file *files_fget_by_index(FAR struct filelist *list,
FAR struct file *filep;
irqstate_t flags;
flags = spin_lock_irqsave(&list->fl_lock);
flags = raw_spin_lock_irqsave(&list->fl_lock);
filep = &list->fl_files[l1][l2];
spin_unlock_irqrestore(&list->fl_lock, flags);
raw_spin_unlock_irqrestore(&list->fl_lock, flags);
#ifdef CONFIG_FS_REFCOUNT
if (filep->f_inode != NULL)
@ -164,7 +164,7 @@ static int files_extend(FAR struct filelist *list, size_t row)
}
while (++i < row);
flags = spin_lock_irqsave(&list->fl_lock);
flags = raw_spin_lock_irqsave(&list->fl_lock);
/* To avoid race condition, if the file list is updated by other threads
* and list rows is greater or equal than temp list,
@ -173,7 +173,7 @@ static int files_extend(FAR struct filelist *list, size_t row)
if (orig_rows != list->fl_rows && list->fl_rows >= row)
{
spin_unlock_irqrestore(&list->fl_lock, flags);
raw_spin_unlock_irqrestore(&list->fl_lock, flags);
for (j = orig_rows; j < i; j++)
{
@ -195,7 +195,7 @@ static int files_extend(FAR struct filelist *list, size_t row)
list->fl_files = files;
list->fl_rows = row;
spin_unlock_irqrestore(&list->fl_lock, flags);
raw_spin_unlock_irqrestore(&list->fl_lock, flags);
if (tmp != NULL && tmp != &list->fl_prefile)
{
@ -565,13 +565,13 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode,
/* Find free file */
flags = spin_lock_irqsave(&list->fl_lock);
flags = raw_spin_lock_irqsave(&list->fl_lock);
for (; ; i++, j = 0)
{
if (i >= list->fl_rows)
{
spin_unlock_irqrestore(&list->fl_lock, flags);
raw_spin_unlock_irqrestore(&list->fl_lock, flags);
ret = files_extend(list, i + 1);
if (ret < 0)
@ -579,7 +579,7 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode,
return ret;
}
flags = spin_lock_irqsave(&list->fl_lock);
flags = raw_spin_lock_irqsave(&list->fl_lock);
}
do
@ -608,7 +608,7 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode,
}
found:
spin_unlock_irqrestore(&list->fl_lock, flags);
raw_spin_unlock_irqrestore(&list->fl_lock, flags);
if (addref)
{