mm/iob: remove csection

reason:
We decouple semcount from business logic
by using an independent counting variable,
which allows us to remove critical sections in many cases.

Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
hujun5 2024-11-11 17:13:43 +08:00 committed by Alan C. Assis
parent 2714f1b605
commit 961767b2a4
11 changed files with 230 additions and 236 deletions

View file

@ -33,6 +33,7 @@
#include <nuttx/mm/iob.h>
#include <nuttx/semaphore.h>
#include <nuttx/spinlock.h>
#ifdef CONFIG_MM_IOB
@ -76,14 +77,29 @@ extern FAR struct iob_qentry_s *g_iob_qcommitted;
/* Counting semaphores that tracks the number of free IOBs/qentries */
extern sem_t g_iob_sem; /* Counts free I/O buffers */
extern sem_t g_iob_sem;
/* Counts free I/O buffers */
extern volatile int16_t g_iob_count;
#if CONFIG_IOB_THROTTLE > 0
extern sem_t g_throttle_sem; /* Counts available I/O buffers when throttled */
extern sem_t g_throttle_sem;
/* Counts available I/O buffers when throttled */
extern volatile int16_t g_throttle_count;
#endif
#if CONFIG_IOB_NCHAINS > 0
extern sem_t g_qentry_sem; /* Counts free I/O buffer queue containers */
extern sem_t g_qentry_sem;
/* Counts free I/O buffer queue containers */
extern volatile int16_t g_qentry_count;
#endif
extern volatile spinlock_t g_iob_lock;
/****************************************************************************
* Public Function Prototypes
****************************************************************************/

View file

@ -63,7 +63,6 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
qentry->qe_flink = NULL;
irqstate_t flags = enter_critical_section();
if (!iobq->qh_head)
{
iobq->qh_head = qentry;
@ -76,8 +75,6 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
iobq->qh_tail = qentry;
}
leave_critical_section(flags);
return 0;
}

View file

@ -78,7 +78,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
* to protect the committed list: We disable interrupts very briefly.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
/* Take the I/O buffer from the head of the committed list */
@ -97,10 +97,82 @@ static FAR struct iob_s *iob_alloc_committed(void)
iob->io_pktlen = 0; /* Total length of the packet */
}
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
return iob;
}
static FAR struct iob_s *iob_tryalloc_internal(bool throttled)
{
FAR struct iob_s *iob;
#if CONFIG_IOB_THROTTLE > 0
int16_t count;
#endif
#if CONFIG_IOB_THROTTLE > 0
/* Select the count to check. */
count = (throttled ? g_throttle_count : g_iob_count);
#endif
/* We don't know what context we are called from so we use extreme measures
* to protect the free list: We disable interrupts very briefly.
*/
#if CONFIG_IOB_THROTTLE > 0
/* If there are free I/O buffers for this allocation */
if (count > 0)
#endif
{
/* Take the I/O buffer from the head of the free list */
iob = g_iob_freelist;
if (iob != NULL)
{
/* Remove the I/O buffer from the free list and decrement the
* counting semaphore(s) that tracks the number of available
* IOBs.
*/
g_iob_freelist = iob->io_flink;
/* Take a semaphore count. Note that we cannot do this in
* in the orthodox way by calling nxsem_wait() or nxsem_trywait()
* because this function may be called from an interrupt
* handler. Fortunately we know at at least one free buffer
* so a simple decrement is all that is needed.
*/
g_iob_count--;
DEBUGASSERT(g_iob_count >= 0);
#if CONFIG_IOB_THROTTLE > 0
/* The throttle semaphore is used to throttle the number of
* free buffers that are available. It is used to prevent
* the overrunning of the free buffer list. Please note that
* it can only be decremented to zero, which indicates no
* throttled buffers are available.
*/
if (g_throttle_count > 0)
{
g_throttle_count--;
}
#endif
/* Put the I/O buffer in a known state */
iob->io_flink = NULL; /* Not in a chain */
iob->io_len = 0; /* Length of the data in the entry */
iob->io_offset = 0; /* Offset to the beginning of data */
iob->io_pktlen = 0; /* Total length of the packet */
return iob;
}
}
return NULL;
}
/****************************************************************************
* Name: iob_allocwait
*
@ -113,6 +185,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
{
FAR struct iob_s *iob;
FAR volatile int16_t *count;
irqstate_t flags;
FAR sem_t *sem;
clock_t start;
@ -121,8 +194,10 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
#if CONFIG_IOB_THROTTLE > 0
/* Select the semaphore count to check. */
count = (throttled ? &g_throttle_count : &g_iob_count);
sem = (throttled ? &g_throttle_sem : &g_iob_sem);
#else
count = &g_iob_count;
sem = &g_iob_sem;
#endif
@ -132,15 +207,14 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
* we are waiting for I/O buffers to become free.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
/* Try to get an I/O buffer. If successful, the semaphore count will be
* decremented atomically.
*/
start = clock_systime_ticks();
iob = iob_tryalloc(throttled);
while (ret == OK && iob == NULL)
iob = iob_tryalloc_internal(throttled);
if (iob == NULL)
{
/* If not successful, then the semaphore count was less than or equal
* to zero (meaning that there are no free buffers). We need to wait
@ -148,12 +222,17 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
* list.
*/
(*count)--;
spin_unlock_irqrestore(&g_iob_lock, flags);
if (timeout == UINT_MAX)
{
ret = nxsem_wait_uninterruptible(sem);
}
else
{
start = clock_systime_ticks();
ret = nxsem_tickwait_uninterruptible(sem,
iob_allocwait_gettimeout(start, timeout));
}
@ -165,10 +244,13 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
*/
iob = iob_alloc_committed();
DEBUGASSERT(iob != NULL);
}
return iob;
}
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
return iob;
}
@ -250,78 +332,15 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
{
FAR struct iob_s *iob;
irqstate_t flags;
#if CONFIG_IOB_THROTTLE > 0
FAR sem_t *sem;
#endif
#if CONFIG_IOB_THROTTLE > 0
/* Select the semaphore count to check. */
sem = (throttled ? &g_throttle_sem : &g_iob_sem);
#endif
/* We don't know what context we are called from so we use extreme measures
* to protect the free list: We disable interrupts very briefly.
*/
flags = enter_critical_section();
#if CONFIG_IOB_THROTTLE > 0
/* If there are free I/O buffers for this allocation */
if (sem->semcount > 0)
#endif
{
/* Take the I/O buffer from the head of the free list */
iob = g_iob_freelist;
if (iob != NULL)
{
/* Remove the I/O buffer from the free list and decrement the
* counting semaphore(s) that tracks the number of available
* IOBs.
*/
g_iob_freelist = iob->io_flink;
/* Take a semaphore count. Note that we cannot do this in
* in the orthodox way by calling nxsem_wait() or nxsem_trywait()
* because this function may be called from an interrupt
* handler. Fortunately we know at at least one free buffer
* so a simple decrement is all that is needed.
*/
g_iob_sem.semcount--;
DEBUGASSERT(g_iob_sem.semcount >= 0);
#if CONFIG_IOB_THROTTLE > 0
/* The throttle semaphore is used to throttle the number of
* free buffers that are available. It is used to prevent
* the overrunning of the free buffer list. Please note that
* it can only be decremented to zero, which indicates no
* throttled buffers are available.
*/
if (g_throttle_sem.semcount > 0)
{
g_throttle_sem.semcount--;
}
#endif
leave_critical_section(flags);
/* Put the I/O buffer in a known state */
iob->io_flink = NULL; /* Not in a chain */
iob->io_len = 0; /* Length of the data in the entry */
iob->io_offset = 0; /* Offset to the beginning of data */
iob->io_pktlen = 0; /* Total length of the packet */
return iob;
}
}
leave_critical_section(flags);
return NULL;
flags = spin_lock_irqsave(&g_iob_lock);
iob = iob_tryalloc_internal(throttled);
spin_unlock_irqrestore(&g_iob_lock, flags);
return iob;
}
#ifdef CONFIG_IOB_ALLOC

View file

@ -59,7 +59,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
* to protect the committed list: We disable interrupts very briefly.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
/* Take the I/O buffer from the head of the committed list */
@ -75,7 +75,43 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
iobq->qe_head = NULL; /* Nothing is contained */
}
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
return iobq;
}
static FAR struct iob_qentry_s *iob_tryalloc_qentry_internal(void)
{
FAR struct iob_qentry_s *iobq;
/* We don't know what context we are called from so we use extreme measures
* to protect the free list: We disable interrupts very briefly.
*/
iobq = g_iob_freeqlist;
if (iobq)
{
/* Remove the I/O buffer chain container from the free list and
* decrement the counting semaphore that tracks the number of free
* containers.
*/
g_iob_freeqlist = iobq->qe_flink;
/* Take a semaphore count. Note that we cannot do this in
* in the orthodox way by calling nxsem_wait() or nxsem_trywait()
* because this function may be called from an interrupt
* handler. Fortunately we know at at least one free buffer
* so a simple decrement is all that is needed.
*/
g_qentry_count--;
DEBUGASSERT(g_qentry_count >= 0);
/* Put the I/O buffer in a known state */
iobq->qe_head = NULL; /* Nothing is contained */
}
return iobq;
}
@ -101,14 +137,14 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
* re-enabled while we are waiting for I/O buffers to become free.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
/* Try to get an I/O buffer chain container. If successful, the semaphore
* count will bedecremented atomically.
*/
qentry = iob_tryalloc_qentry();
while (ret == OK && qentry == NULL)
qentry = iob_tryalloc_qentry_internal();
if (qentry == NULL)
{
/* If not successful, then the semaphore count was less than or equal
* to zero (meaning that there are no free buffers). We need to wait
@ -116,6 +152,8 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
* semaphore count will be incremented.
*/
g_qentry_count--;
spin_unlock_irqrestore(&g_iob_lock, flags);
ret = nxsem_wait_uninterruptible(&g_qentry_sem);
if (ret >= 0)
{
@ -127,26 +165,13 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void)
qentry = iob_alloc_qcommitted();
DEBUGASSERT(qentry != NULL);
if (qentry == NULL)
{
/* This should not fail, but we allow for that possibility to
* handle any potential, non-obvious race condition. Perhaps
* the free IOB ended up in the g_iob_free list?
*
* We need release our count so that it is available to
* iob_tryalloc(), perhaps allowing another thread to take our
* count. In that event, iob_tryalloc() will fail above and
* we will have to wait again.
*/
nxsem_post(&g_qentry_sem);
qentry = iob_tryalloc_qentry();
}
}
return qentry;
}
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
return qentry;
}
@ -201,33 +226,9 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
* to protect the free list: We disable interrupts very briefly.
*/
flags = enter_critical_section();
iobq = g_iob_freeqlist;
if (iobq)
{
/* Remove the I/O buffer chain container from the free list and
* decrement the counting semaphore that tracks the number of free
* containers.
*/
g_iob_freeqlist = iobq->qe_flink;
/* Take a semaphore count. Note that we cannot do this in
* in the orthodox way by calling nxsem_wait() or nxsem_trywait()
* because this function may be called from an interrupt
* handler. Fortunately we know at at least one free buffer
* so a simple decrement is all that is needed.
*/
g_qentry_sem.semcount--;
DEBUGASSERT(g_qentry_sem.semcount >= 0);
/* Put the I/O buffer in a known state */
iobq->qe_head = NULL; /* Nothing is contained */
}
leave_critical_section(flags);
flags = spin_lock_irqsave(&g_iob_lock);
iobq = iob_tryalloc_qentry_internal();
spin_unlock_irqrestore(&g_iob_lock, flags);
return iobq;
}

View file

@ -83,9 +83,6 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
#ifdef CONFIG_IOB_NOTIFIER
int16_t navail;
#endif
#if CONFIG_IOB_THROTTLE > 0
bool committed_thottled = false;
#endif
iobinfo("iob=%p io_pktlen=%u io_len=%u next=%p\n",
iob, iob->io_pktlen, iob->io_len, next);
@ -135,7 +132,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
* interrupts very briefly.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
/* Which list? If there is a task waiting for an IOB, then put
* the IOB on either the free list or on the committed list where
@ -145,80 +142,58 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
*/
#if CONFIG_IOB_THROTTLE > 0
if ((g_iob_sem.semcount < 0) ||
((g_iob_sem.semcount >= CONFIG_IOB_THROTTLE) &&
(g_throttle_sem.semcount < 0)))
if ((g_iob_count < 0) ||
((g_iob_count >= CONFIG_IOB_THROTTLE) &&
(g_throttle_count < 0)))
#else
if (g_iob_sem.semcount < 0)
if (g_iob_count < 0)
#endif
{
FAR sem_t *sem;
iob->io_flink = g_iob_committed;
g_iob_committed = iob;
#if CONFIG_IOB_THROTTLE > 0
if ((g_iob_sem.semcount >= CONFIG_IOB_THROTTLE) &&
(g_throttle_sem.semcount < 0))
if (g_iob_count < 0)
{
committed_thottled = true;
g_iob_count++;
sem = &g_iob_sem;
}
else
{
g_throttle_count++;
sem = &g_throttle_sem;
}
#else
g_iob_count++;
sem = &g_iob_sem;
#endif
spin_unlock_irqrestore(&g_iob_lock, flags);
nxsem_post(sem);
}
else
{
g_iob_count++;
#if CONFIG_IOB_THROTTLE > 0
if (g_iob_count > CONFIG_IOB_THROTTLE)
{
g_throttle_count++;
}
#endif
iob->io_flink = g_iob_freelist;
g_iob_freelist = iob;
spin_unlock_irqrestore(&g_iob_lock, flags);
}
leave_critical_section(flags);
/* Signal that an IOB is available. This is done with schedule locked
* to make sure that both g_iob_sem and g_throttle_sem are incremented
* together (if applicable). After the schedule is unlocked, if there
* is a thread blocked, waiting for an IOB, this will wake up exactly
* one thread. The semaphore count will correctly indicate that the
* awakened task owns an IOB and should find it in the committed list.
*/
sched_lock();
nxsem_post(&g_iob_sem);
DEBUGASSERT(g_iob_sem.semcount <= CONFIG_IOB_NBUFFERS);
DEBUGASSERT(g_iob_count <= CONFIG_IOB_NBUFFERS);
#if CONFIG_IOB_THROTTLE > 0
flags = enter_critical_section();
if (g_iob_sem.semcount > CONFIG_IOB_THROTTLE)
{
/* If posting to the the throttled semaphore is going to awake a
* waiting task, then the g_iob_sem count should be decremented
* because an I/O buffer (from the head of the g_iob_committed list)
* will be allocated to this waiting task.
* Decrementing the g_throttled_sem (when posting to the g_iob_sem)
* is not necessary because this condition can only occur when the
* g_throttled_sem is less or equal to zero. On the other hand, if
* the g_iob_sem is greater than the CONFIG_IOB_THROTTLE and there
* is a waiting thread, then the I/O buffer just freed will be
* committed to a waiting task and is not available for general use.
*/
if (committed_thottled)
{
g_iob_sem.semcount--;
}
leave_critical_section(flags);
nxsem_post(&g_throttle_sem);
DEBUGASSERT(g_throttle_sem.semcount <=
DEBUGASSERT(g_throttle_count <=
(CONFIG_IOB_NBUFFERS - CONFIG_IOB_THROTTLE));
}
else
{
leave_critical_section(flags);
}
#endif
sched_unlock();
#ifdef CONFIG_IOB_NOTIFIER
/* Check if the IOB was claimed by a thread that is blocked waiting
* for an IOB.

View file

@ -60,7 +60,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
* interrupts very briefly.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
/* Which list? If there is a task waiting for an IOB chain, then put
* the IOB chain on either the free list or on the committed list where
@ -68,27 +68,22 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
* iob_tryalloc_qentry()).
*/
if (g_qentry_sem.semcount < 0)
if (g_qentry_count < 0)
{
iobq->qe_flink = g_iob_qcommitted;
g_iob_qcommitted = iobq;
g_qentry_count++;
spin_unlock_irqrestore(&g_iob_lock, flags);
nxsem_post(&g_qentry_sem);
}
else
{
g_qentry_count++;
iobq->qe_flink = g_iob_freeqlist;
g_iob_freeqlist = iobq;
spin_unlock_irqrestore(&g_iob_lock, flags);
}
/* Signal that an I/O buffer chain container is available. If there
* is a thread waiting for an I/O buffer chain container, this will
* wake up exactly one thread. The semaphore count will correctly
* indicated that the awakened task owns an I/O buffer chain container
* and should find it in the committed list.
*/
nxsem_post(&g_qentry_sem);
leave_critical_section(flags);
/* And return the I/O buffer chain container after the one that was freed */
return nextq;

View file

@ -53,7 +53,6 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
FAR struct iob_qentry_s *prev = NULL;
FAR struct iob_qentry_s *qentry;
irqstate_t flags = enter_critical_section();
for (qentry = iobq->qh_head; qentry != NULL;
prev = qentry, qentry = qentry->qe_flink)
{
@ -86,8 +85,6 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
break;
}
}
leave_critical_section(flags);
}
#endif /* CONFIG_IOB_NCHAINS > 0 */

View file

@ -91,23 +91,32 @@ FAR struct iob_qentry_s *g_iob_freeqlist;
FAR struct iob_qentry_s *g_iob_qcommitted;
#endif
sem_t g_iob_sem = SEM_INITIALIZER(0);
/* Counting semaphores that tracks the number of free IOBs/qentries */
sem_t g_iob_sem = SEM_INITIALIZER(CONFIG_IOB_NBUFFERS);
volatile int16_t g_iob_count = CONFIG_IOB_NBUFFERS;
#if CONFIG_IOB_THROTTLE > 0
sem_t g_throttle_sem = SEM_INITIALIZER(0);
/* Counts available I/O buffers when throttled */
sem_t g_throttle_sem = SEM_INITIALIZER(CONFIG_IOB_NBUFFERS -
CONFIG_IOB_THROTTLE);
volatile int16_t g_throttle_count = CONFIG_IOB_NBUFFERS -
CONFIG_IOB_THROTTLE;
#endif
#if CONFIG_IOB_NCHAINS > 0
sem_t g_qentry_sem = SEM_INITIALIZER(0);
/* Counts free I/O buffer queue containers */
sem_t g_qentry_sem = SEM_INITIALIZER(CONFIG_IOB_NCHAINS);
volatile int16_t g_qentry_count = CONFIG_IOB_NCHAINS;
#endif
volatile spinlock_t g_iob_lock = SP_UNLOCKED;
/****************************************************************************
* Public Functions
****************************************************************************/

View file

@ -46,34 +46,27 @@
int iob_navail(bool throttled)
{
int navail = 0;
int ret;
#if CONFIG_IOB_NBUFFERS > 0
/* Get the value of the IOB counting semaphores */
ret = nxsem_get_value(&g_iob_sem, &navail);
if (ret >= 0)
{
ret = navail;
ret = g_iob_count;
#if CONFIG_IOB_THROTTLE > 0
/* Subtract the throttle value is so requested */
/* Subtract the throttle value is so requested */
if (throttled)
{
ret -= CONFIG_IOB_THROTTLE;
}
if (throttled)
{
ret -= CONFIG_IOB_THROTTLE;
}
#endif
if (ret < 0)
{
ret = 0;
}
if (ret < 0)
{
ret = 0;
}
#else
ret = navail;
ret = 0;
#endif
return ret;
@ -89,24 +82,18 @@ int iob_navail(bool throttled)
int iob_qentry_navail(void)
{
int navail = 0;
int ret;
#if CONFIG_IOB_NCHAINS > 0
/* Get the value of the IOB chain qentry counting semaphores */
ret = nxsem_get_value(&g_qentry_sem, &navail);
if (ret >= 0)
ret = g_qentry_count;
if (ret < 0)
{
ret = navail;
if (ret < 0)
{
ret = 0;
}
ret = 0;
}
#else
ret = navail;
ret = 0;
#endif
return ret;

View file

@ -58,7 +58,6 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
/* Remove the I/O buffer chain from the head of the queue */
irqstate_t flags = enter_critical_section();
qentry = iobq->qh_head;
if (qentry)
{
@ -76,7 +75,6 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
iob_free_qentry(qentry);
}
leave_critical_section(flags);
return iob;
}

View file

@ -56,7 +56,7 @@ void iob_getstats(FAR struct iob_stats_s *stats)
{
stats->ntotal = CONFIG_IOB_NBUFFERS;
nxsem_get_value(&g_iob_sem, &stats->nfree);
stats->nfree = g_iob_count;
if (stats->nfree < 0)
{
stats->nwait = -stats->nfree;
@ -68,7 +68,7 @@ void iob_getstats(FAR struct iob_stats_s *stats)
}
#if CONFIG_IOB_THROTTLE > 0
nxsem_get_value(&g_throttle_sem, &stats->nthrottle);
stats->nthrottle = g_throttle_count;
if (stats->nthrottle < 0)
{
stats->nthrottle = -stats->nthrottle;