From b4def16ac371ec2f90b63861f78895bb17318cd8 Mon Sep 17 00:00:00 2001 From: liguiding1 Date: Wed, 13 Dec 2023 17:24:07 +0800 Subject: [PATCH] Revert "mm/iob: Replace the critical section with spin lock" `g_iob_sem.semcount` is both manually changed in iob source code and api nxsem_xxx. nxsem related API uses critical_section to ensure sem value is modified correctly. If iob using spin lock and modify sem value in the same time, it's not safe. This PR revert the spin lock change and uses critical section to align with what nxsem uses. --- mm/iob/iob.h | 3 --- mm/iob/iob_add_queue.c | 4 ++-- mm/iob/iob_alloc.c | 10 +++++----- mm/iob/iob_alloc_qentry.c | 8 ++++---- mm/iob/iob_free.c | 10 +++++----- mm/iob/iob_free_qentry.c | 5 ++--- mm/iob/iob_free_queue_qentry.c | 8 +++----- mm/iob/iob_initialize.c | 2 -- mm/iob/iob_remove_queue.c | 9 ++------- 9 files changed, 23 insertions(+), 36 deletions(-) diff --git a/mm/iob/iob.h b/mm/iob/iob.h index aea1198304..d5fe93dc91 100644 --- a/mm/iob/iob.h +++ b/mm/iob/iob.h @@ -32,7 +32,6 @@ #include #include -#include #include #ifdef CONFIG_MM_IOB @@ -85,8 +84,6 @@ extern sem_t g_throttle_sem; /* Counts available I/O buffers when throttled */ extern sem_t g_qentry_sem; /* Counts free I/O buffer queue containers */ #endif -extern spinlock_t g_iob_lock; - /**************************************************************************** * Public Function Prototypes ****************************************************************************/ diff --git a/mm/iob/iob_add_queue.c b/mm/iob/iob_add_queue.c index f0ccb81841..38a641d023 100644 --- a/mm/iob/iob_add_queue.c +++ b/mm/iob/iob_add_queue.c @@ -63,7 +63,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob, qentry->qe_flink = NULL; - irqstate_t flags = spin_lock_irqsave(&g_iob_lock); + irqstate_t flags = enter_critical_section(); if (!iobq->qh_head) { iobq->qh_head = qentry; @@ -76,7 +76,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob, iobq->qh_tail = qentry; } - spin_unlock_irqrestore(&g_iob_lock, flags); + leave_critical_section(flags); return 0; } diff --git a/mm/iob/iob_alloc.c b/mm/iob/iob_alloc.c index f1bc346123..42cafee97c 100644 --- a/mm/iob/iob_alloc.c +++ b/mm/iob/iob_alloc.c @@ -78,7 +78,7 @@ static FAR struct iob_s *iob_alloc_committed(void) * to protect the committed list: We disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = enter_critical_section(); /* Take the I/O buffer from the head of the committed list */ @@ -97,7 +97,7 @@ static FAR struct iob_s *iob_alloc_committed(void) iob->io_pktlen = 0; /* Total length of the packet */ } - spin_unlock_irqrestore(&g_iob_lock, flags); + leave_critical_section(flags); return iob; } @@ -264,7 +264,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled) * to protect the free list: We disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = enter_critical_section(); #if CONFIG_IOB_THROTTLE > 0 /* If there are free I/O buffers for this allocation */ @@ -308,7 +308,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled) } #endif - spin_unlock_irqrestore(&g_iob_lock, flags); + leave_critical_section(flags); /* Put the I/O buffer in a known state */ @@ -320,7 +320,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled) } } - spin_unlock_irqrestore(&g_iob_lock, flags); + leave_critical_section(flags); return NULL; } diff --git a/mm/iob/iob_alloc_qentry.c b/mm/iob/iob_alloc_qentry.c index 26ee1d827f..82c8393c44 100644 --- a/mm/iob/iob_alloc_qentry.c +++ b/mm/iob/iob_alloc_qentry.c @@ -59,7 +59,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void) * to protect the committed list: We disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = enter_critical_section(); /* Take the I/O buffer from the head of the committed list */ @@ -75,7 +75,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void) iobq->qe_head = NULL; /* Nothing is contained */ } - spin_unlock_irqrestore(&g_iob_lock, flags); + leave_critical_section(flags); return iobq; } @@ -201,7 +201,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void) * to protect the free list: We disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = enter_critical_section(); iobq = g_iob_freeqlist; if (iobq) { @@ -227,7 +227,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void) iobq->qe_head = NULL; /* Nothing is contained */ } - spin_unlock_irqrestore(&g_iob_lock, flags); + leave_critical_section(flags); return iobq; } diff --git a/mm/iob/iob_free.c b/mm/iob/iob_free.c index 049e4c29ae..cec01c8295 100644 --- a/mm/iob/iob_free.c +++ b/mm/iob/iob_free.c @@ -135,7 +135,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob) * interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = enter_critical_section(); /* Which list? If there is a task waiting for an IOB, then put * the IOB on either the free list or on the committed list where @@ -168,7 +168,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob) g_iob_freelist = iob; } - spin_unlock_irqrestore(&g_iob_lock, flags); + leave_critical_section(flags); /* Signal that an IOB is available. This is done with schedule locked * to make sure that both g_iob_sem and g_throttle_sem are incremented @@ -184,7 +184,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob) DEBUGASSERT(g_iob_sem.semcount <= CONFIG_IOB_NBUFFERS); #if CONFIG_IOB_THROTTLE > 0 - flags = spin_lock_irqsave(&g_iob_lock); + flags = enter_critical_section(); if (g_iob_sem.semcount > CONFIG_IOB_THROTTLE) { @@ -205,7 +205,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob) g_iob_sem.semcount--; } - spin_unlock_irqrestore(&g_iob_lock, flags); + leave_critical_section(flags); nxsem_post(&g_throttle_sem); DEBUGASSERT(g_throttle_sem.semcount <= @@ -213,7 +213,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob) } else { - spin_unlock_irqrestore(&g_iob_lock, flags); + leave_critical_section(flags); } #endif diff --git a/mm/iob/iob_free_qentry.c b/mm/iob/iob_free_qentry.c index 992aba1787..f40e35d8b3 100644 --- a/mm/iob/iob_free_qentry.c +++ b/mm/iob/iob_free_qentry.c @@ -60,7 +60,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq) * interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = enter_critical_section(); /* Which list? If there is a task waiting for an IOB chain, then put * the IOB chain on either the free list or on the committed list where @@ -79,8 +79,6 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq) g_iob_freeqlist = iobq; } - spin_unlock_irqrestore(&g_iob_lock, flags); - /* Signal that an I/O buffer chain container is available. If there * is a thread waiting for an I/O buffer chain container, this will * wake up exactly one thread. The semaphore count will correctly @@ -89,6 +87,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq) */ nxsem_post(&g_qentry_sem); + leave_critical_section(flags); /* And return the I/O buffer chain container after the one that was freed */ diff --git a/mm/iob/iob_free_queue_qentry.c b/mm/iob/iob_free_queue_qentry.c index 5378efd574..315b23b116 100644 --- a/mm/iob/iob_free_queue_qentry.c +++ b/mm/iob/iob_free_queue_qentry.c @@ -53,7 +53,7 @@ void iob_free_queue_qentry(FAR struct iob_s *iob, FAR struct iob_qentry_s *prev = NULL; FAR struct iob_qentry_s *qentry; - irqstate_t flags = spin_lock_irqsave(&g_iob_lock); + irqstate_t flags = enter_critical_section(); for (qentry = iobq->qh_head; qentry != NULL; prev = qentry, qentry = qentry->qe_flink) { @@ -75,8 +75,6 @@ void iob_free_queue_qentry(FAR struct iob_s *iob, iobq->qh_tail = prev; } - spin_unlock_irqrestore(&g_iob_lock, flags); - /* Remove the queue container */ iob_free_qentry(qentry); @@ -85,11 +83,11 @@ void iob_free_queue_qentry(FAR struct iob_s *iob, iob_free_chain(iob); - return; + break; } } - spin_unlock_irqrestore(&g_iob_lock, flags); + leave_critical_section(flags); } #endif /* CONFIG_IOB_NCHAINS > 0 */ diff --git a/mm/iob/iob_initialize.c b/mm/iob/iob_initialize.c index 0093ffe7ac..b37aef98c5 100644 --- a/mm/iob/iob_initialize.c +++ b/mm/iob/iob_initialize.c @@ -108,8 +108,6 @@ sem_t g_throttle_sem = SEM_INITIALIZER(CONFIG_IOB_NBUFFERS - sem_t g_qentry_sem = SEM_INITIALIZER(CONFIG_IOB_NCHAINS); #endif -spinlock_t g_iob_lock = SP_UNLOCKED; - /**************************************************************************** * Public Functions ****************************************************************************/ diff --git a/mm/iob/iob_remove_queue.c b/mm/iob/iob_remove_queue.c index bc3b506384..25a8679cbf 100644 --- a/mm/iob/iob_remove_queue.c +++ b/mm/iob/iob_remove_queue.c @@ -58,7 +58,7 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq) /* Remove the I/O buffer chain from the head of the queue */ - irqstate_t flags = spin_lock_irqsave(&g_iob_lock); + irqstate_t flags = enter_critical_section(); qentry = iobq->qh_head; if (qentry) { @@ -68,8 +68,6 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq) iobq->qh_tail = NULL; } - spin_unlock_irqrestore(&g_iob_lock, flags); - /* Extract the I/O buffer chain from the container and free the * container. */ @@ -77,11 +75,8 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq) iob = qentry->qe_head; iob_free_qentry(qentry); } - else - { - spin_unlock_irqrestore(&g_iob_lock, flags); - } + leave_critical_section(flags); return iob; }