From ace7f0c63393386e8cee8a75b71de95b12c650b2 Mon Sep 17 00:00:00 2001 From: chao an Date: Fri, 24 Jan 2025 09:58:04 +0800 Subject: [PATCH] fs/spinlock: replace no sched_lock() version to improve the performance after below change merge to kernel, spin_lock() will turn off preemption by default, but this change is not applicable to all scenarios. The locations in the kernel that use spin_lock() extensively only require short critical sections and do not trigger scheduling, which leads to serious performance degradation of NuttX in AMP mode. In this PR, I try to expose similar problems and hope that each subsystem will carefully check the code coverage https://github.com/apache/nuttx/pull/14578 |commit b69111d16a2a330fa272af8175c832e08881844b |Author: hujun5 |Date: Thu Jan 23 16:14:18 2025 +0800 | | spinlock: add sched_lock to spin_lock_irqsave | | reason: | We aim to replace big locks with smaller ones. So we will use spin_lock_irqsave extensively to | replace enter_critical_section in the subsequent process. We imitate the implementation of Linux | by adding sched_lock to spin_lock_irqsave in order to address scenarios where sem_post occurs | within spin_lock_irqsave, which can lead to spinlock failures and deadlocks. | | Signed-off-by: hujun5 Signed-off-by: chao an --- fs/inode/fs_files.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/inode/fs_files.c b/fs/inode/fs_files.c index 4c2baefab4..878ef6664b 100644 --- a/fs/inode/fs_files.c +++ b/fs/inode/fs_files.c @@ -72,9 +72,9 @@ static FAR struct file *files_fget_by_index(FAR struct filelist *list, FAR struct file *filep; irqstate_t flags; - flags = spin_lock_irqsave(&list->fl_lock); + flags = raw_spin_lock_irqsave(&list->fl_lock); filep = &list->fl_files[l1][l2]; - spin_unlock_irqrestore(&list->fl_lock, flags); + raw_spin_unlock_irqrestore(&list->fl_lock, flags); #ifdef CONFIG_FS_REFCOUNT if (filep->f_inode != NULL) @@ -164,7 +164,7 @@ static int files_extend(FAR struct filelist *list, size_t row) } while (++i < row); - flags = spin_lock_irqsave(&list->fl_lock); + flags = raw_spin_lock_irqsave(&list->fl_lock); /* To avoid race condition, if the file list is updated by other threads * and list rows is greater or equal than temp list, @@ -173,7 +173,7 @@ static int files_extend(FAR struct filelist *list, size_t row) if (orig_rows != list->fl_rows && list->fl_rows >= row) { - spin_unlock_irqrestore(&list->fl_lock, flags); + raw_spin_unlock_irqrestore(&list->fl_lock, flags); for (j = orig_rows; j < i; j++) { @@ -195,7 +195,7 @@ static int files_extend(FAR struct filelist *list, size_t row) list->fl_files = files; list->fl_rows = row; - spin_unlock_irqrestore(&list->fl_lock, flags); + raw_spin_unlock_irqrestore(&list->fl_lock, flags); if (tmp != NULL && tmp != &list->fl_prefile) { @@ -565,13 +565,13 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode, /* Find free file */ - flags = spin_lock_irqsave(&list->fl_lock); + flags = raw_spin_lock_irqsave(&list->fl_lock); for (; ; i++, j = 0) { if (i >= list->fl_rows) { - spin_unlock_irqrestore(&list->fl_lock, flags); + raw_spin_unlock_irqrestore(&list->fl_lock, flags); ret = files_extend(list, i + 1); if (ret < 0) @@ -579,7 +579,7 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode, return ret; } - flags = spin_lock_irqsave(&list->fl_lock); + flags = raw_spin_lock_irqsave(&list->fl_lock); } do @@ -608,7 +608,7 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode, } found: - spin_unlock_irqrestore(&list->fl_lock, flags); + raw_spin_unlock_irqrestore(&list->fl_lock, flags); if (addref) {