Revert "mm/iob: Replace the critical section with spin lock"

`g_iob_sem.semcount` is both manually changed in iob source code and api
nxsem_xxx.

nxsem related API uses critical_section to ensure sem value is modified
correctly. If iob using spin lock and modify sem value in the same time,
it's not safe.

This PR revert the spin lock change and uses critical section to align
with what nxsem uses.
This commit is contained in:
liguiding1 2023-12-13 17:24:07 +08:00 committed by Xiang Xiao
parent 1280ac845a
commit b4def16ac3
9 changed files with 23 additions and 36 deletions

View file

@ -32,7 +32,6 @@
#include <debug.h>
#include <nuttx/mm/iob.h>
#include <nuttx/spinlock.h>
#include <nuttx/semaphore.h>
#ifdef CONFIG_MM_IOB
@ -85,8 +84,6 @@ extern sem_t g_throttle_sem; /* Counts available I/O buffers when throttled */
extern sem_t g_qentry_sem; /* Counts free I/O buffer queue containers */
#endif
extern spinlock_t g_iob_lock;
/****************************************************************************
* Public Function Prototypes
****************************************************************************/

View file

@ -63,7 +63,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
qentry->qe_flink = NULL;
irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
irqstate_t flags = enter_critical_section();
if (!iobq->qh_head)
{
iobq->qh_head = qentry;
@ -76,7 +76,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
iobq->qh_tail = qentry;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
leave_critical_section(flags);
return 0;
}

View file

@ -78,7 +78,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
* to protect the committed list: We disable interrupts very briefly.
*/
flags = spin_lock_irqsave(&g_iob_lock);
flags = enter_critical_section();
/* Take the I/O buffer from the head of the committed list */
@ -97,7 +97,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
iob->io_pktlen = 0; /* Total length of the packet */
}
spin_unlock_irqrestore(&g_iob_lock, flags);
leave_critical_section(flags);
return iob;
}
@ -264,7 +264,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
* to protect the free list: We disable interrupts very briefly.
*/
flags = spin_lock_irqsave(&g_iob_lock);
flags = enter_critical_section();
#if CONFIG_IOB_THROTTLE > 0
/* If there are free I/O buffers for this allocation */
@ -308,7 +308,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
}
#endif
spin_unlock_irqrestore(&g_iob_lock, flags);
leave_critical_section(flags);
/* Put the I/O buffer in a known state */
@ -320,7 +320,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
}
}
spin_unlock_irqrestore(&g_iob_lock, flags);
leave_critical_section(flags);
return NULL;
}

View file

@ -59,7 +59,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
* to protect the committed list: We disable interrupts very briefly.
*/
flags = spin_lock_irqsave(&g_iob_lock);
flags = enter_critical_section();
/* Take the I/O buffer from the head of the committed list */
@ -75,7 +75,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
iobq->qe_head = NULL; /* Nothing is contained */
}
spin_unlock_irqrestore(&g_iob_lock, flags);
leave_critical_section(flags);
return iobq;
}
@ -201,7 +201,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
* to protect the free list: We disable interrupts very briefly.
*/
flags = spin_lock_irqsave(&g_iob_lock);
flags = enter_critical_section();
iobq = g_iob_freeqlist;
if (iobq)
{
@ -227,7 +227,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
iobq->qe_head = NULL; /* Nothing is contained */
}
spin_unlock_irqrestore(&g_iob_lock, flags);
leave_critical_section(flags);
return iobq;
}

View file

@ -135,7 +135,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
* interrupts very briefly.
*/
flags = spin_lock_irqsave(&g_iob_lock);
flags = enter_critical_section();
/* Which list? If there is a task waiting for an IOB, then put
* the IOB on either the free list or on the committed list where
@ -168,7 +168,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
g_iob_freelist = iob;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
leave_critical_section(flags);
/* Signal that an IOB is available. This is done with schedule locked
* to make sure that both g_iob_sem and g_throttle_sem are incremented
@ -184,7 +184,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
DEBUGASSERT(g_iob_sem.semcount <= CONFIG_IOB_NBUFFERS);
#if CONFIG_IOB_THROTTLE > 0
flags = spin_lock_irqsave(&g_iob_lock);
flags = enter_critical_section();
if (g_iob_sem.semcount > CONFIG_IOB_THROTTLE)
{
@ -205,7 +205,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
g_iob_sem.semcount--;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
leave_critical_section(flags);
nxsem_post(&g_throttle_sem);
DEBUGASSERT(g_throttle_sem.semcount <=
@ -213,7 +213,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
}
else
{
spin_unlock_irqrestore(&g_iob_lock, flags);
leave_critical_section(flags);
}
#endif

View file

@ -60,7 +60,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
* interrupts very briefly.
*/
flags = spin_lock_irqsave(&g_iob_lock);
flags = enter_critical_section();
/* Which list? If there is a task waiting for an IOB chain, then put
* the IOB chain on either the free list or on the committed list where
@ -79,8 +79,6 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
g_iob_freeqlist = iobq;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
/* Signal that an I/O buffer chain container is available. If there
* is a thread waiting for an I/O buffer chain container, this will
* wake up exactly one thread. The semaphore count will correctly
@ -89,6 +87,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
*/
nxsem_post(&g_qentry_sem);
leave_critical_section(flags);
/* And return the I/O buffer chain container after the one that was freed */

View file

@ -53,7 +53,7 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
FAR struct iob_qentry_s *prev = NULL;
FAR struct iob_qentry_s *qentry;
irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
irqstate_t flags = enter_critical_section();
for (qentry = iobq->qh_head; qentry != NULL;
prev = qentry, qentry = qentry->qe_flink)
{
@ -75,8 +75,6 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
iobq->qh_tail = prev;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
/* Remove the queue container */
iob_free_qentry(qentry);
@ -85,11 +83,11 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
iob_free_chain(iob);
return;
break;
}
}
spin_unlock_irqrestore(&g_iob_lock, flags);
leave_critical_section(flags);
}
#endif /* CONFIG_IOB_NCHAINS > 0 */

View file

@ -108,8 +108,6 @@ sem_t g_throttle_sem = SEM_INITIALIZER(CONFIG_IOB_NBUFFERS -
sem_t g_qentry_sem = SEM_INITIALIZER(CONFIG_IOB_NCHAINS);
#endif
spinlock_t g_iob_lock = SP_UNLOCKED;
/****************************************************************************
* Public Functions
****************************************************************************/

View file

@ -58,7 +58,7 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
/* Remove the I/O buffer chain from the head of the queue */
irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
irqstate_t flags = enter_critical_section();
qentry = iobq->qh_head;
if (qentry)
{
@ -68,8 +68,6 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
iobq->qh_tail = NULL;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
/* Extract the I/O buffer chain from the container and free the
* container.
*/
@ -77,11 +75,8 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
iob = qentry->qe_head;
iob_free_qentry(qentry);
}
else
{
spin_unlock_irqrestore(&g_iob_lock, flags);
}
leave_critical_section(flags);
return iob;
}